2024-12-09 05:12:39,435 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-09 05:12:39,452 main DEBUG Took 0.014578 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 05:12:39,452 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 05:12:39,453 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 05:12:39,454 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 05:12:39,455 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,465 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 05:12:39,481 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,483 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,484 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,484 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,485 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,485 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,487 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,487 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,488 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,488 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,490 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,490 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,491 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,491 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,492 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,492 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,493 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,493 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,493 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,494 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,494 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,495 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,496 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,496 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:12:39,496 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,497 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 05:12:39,498 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:12:39,499 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 05:12:39,502 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 05:12:39,502 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 05:12:39,503 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 05:12:39,504 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 05:12:39,515 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 05:12:39,522 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 05:12:39,524 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 05:12:39,525 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 05:12:39,525 main DEBUG createAppenders(={Console}) 2024-12-09 05:12:39,526 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc initialized 2024-12-09 05:12:39,527 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-09 05:12:39,527 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc OK. 2024-12-09 05:12:39,528 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 05:12:39,528 main DEBUG OutputStream closed 2024-12-09 05:12:39,529 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 05:12:39,529 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 05:12:39,529 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@53ce1329 OK 2024-12-09 05:12:39,651 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 05:12:39,654 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 05:12:39,656 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 05:12:39,657 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 05:12:39,658 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 05:12:39,658 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 05:12:39,658 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 05:12:39,659 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 05:12:39,659 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 05:12:39,660 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 05:12:39,660 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 05:12:39,661 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 05:12:39,661 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 05:12:39,662 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 05:12:39,662 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 05:12:39,663 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 05:12:39,663 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 05:12:39,664 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 05:12:39,670 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 05:12:39,670 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-09 05:12:39,671 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 05:12:39,672 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-09T05:12:40,012 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794 2024-12-09 05:12:40,017 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 05:12:40,017 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T05:12:40,032 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-09T05:12:40,098 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=278, ProcessCount=11, AvailableMemoryMB=8530 2024-12-09T05:12:40,103 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T05:12:40,109 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/cluster_55a5741f-dc1e-8fed-1f25-ffd4352cce64, deleteOnExit=true 2024-12-09T05:12:40,109 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-09T05:12:40,110 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/test.cache.data in system properties and HBase conf 2024-12-09T05:12:40,111 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T05:12:40,111 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/hadoop.log.dir in system properties and HBase conf 2024-12-09T05:12:40,112 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T05:12:40,113 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T05:12:40,113 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-09T05:12:40,237 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T05:12:40,360 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T05:12:40,365 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:12:40,366 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:12:40,366 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T05:12:40,367 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:12:40,368 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T05:12:40,368 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T05:12:40,369 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:12:40,369 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:12:40,370 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T05:12:40,371 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/nfs.dump.dir in system properties and HBase conf 2024-12-09T05:12:40,371 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/java.io.tmpdir in system properties and HBase conf 2024-12-09T05:12:40,372 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:12:40,372 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T05:12:40,373 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T05:12:41,055 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T05:12:41,499 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T05:12:41,610 INFO [Time-limited test {}] log.Log(170): Logging initialized @3163ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T05:12:41,712 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:12:41,802 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:12:41,832 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:12:41,832 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:12:41,834 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:12:41,855 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:12:41,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a602904{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:12:41,860 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a2a2c8e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:12:42,136 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@dbae14b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/java.io.tmpdir/jetty-localhost-45533-hadoop-hdfs-3_4_1-tests_jar-_-any-9632134140643846550/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:12:42,149 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d0329cc{HTTP/1.1, (http/1.1)}{localhost:45533} 2024-12-09T05:12:42,150 INFO [Time-limited test {}] server.Server(415): Started @3703ms 2024-12-09T05:12:42,184 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T05:12:42,618 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:12:42,627 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:12:42,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:12:42,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:12:42,628 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:12:42,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6eface5d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:12:42,630 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@583669e3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:12:42,755 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1707a3cb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/java.io.tmpdir/jetty-localhost-43141-hadoop-hdfs-3_4_1-tests_jar-_-any-6629198068283489409/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:12:42,755 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@359dcd6a{HTTP/1.1, (http/1.1)}{localhost:43141} 2024-12-09T05:12:42,756 INFO [Time-limited test {}] server.Server(415): Started @4309ms 2024-12-09T05:12:42,816 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:12:42,978 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:12:42,990 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:12:43,005 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:12:43,005 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:12:43,005 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:12:43,008 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ab144a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:12:43,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40635cd7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:12:43,196 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77880d53{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/java.io.tmpdir/jetty-localhost-36985-hadoop-hdfs-3_4_1-tests_jar-_-any-16029331609855323585/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:12:43,197 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7469b8b8{HTTP/1.1, (http/1.1)}{localhost:36985} 2024-12-09T05:12:43,198 INFO [Time-limited test {}] server.Server(415): Started @4751ms 2024-12-09T05:12:43,201 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:12:43,402 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/cluster_55a5741f-dc1e-8fed-1f25-ffd4352cce64/dfs/data/data1/current/BP-1669704492-172.17.0.2-1733721161171/current, will proceed with Du for space computation calculation, 2024-12-09T05:12:43,402 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/cluster_55a5741f-dc1e-8fed-1f25-ffd4352cce64/dfs/data/data3/current/BP-1669704492-172.17.0.2-1733721161171/current, will proceed with Du for space computation calculation, 2024-12-09T05:12:43,404 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/cluster_55a5741f-dc1e-8fed-1f25-ffd4352cce64/dfs/data/data2/current/BP-1669704492-172.17.0.2-1733721161171/current, will proceed with Du for space computation calculation, 2024-12-09T05:12:43,412 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/cluster_55a5741f-dc1e-8fed-1f25-ffd4352cce64/dfs/data/data4/current/BP-1669704492-172.17.0.2-1733721161171/current, will proceed with Du for space computation calculation, 2024-12-09T05:12:43,478 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:12:43,484 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:12:43,570 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xafc7278c44bb6212 with lease ID 0x7c26281da0fb8342: Processing first storage report for DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148 from datanode DatanodeRegistration(127.0.0.1:33923, datanodeUuid=0828d983-17ca-4fea-a29b-64945bbde071, infoPort=42655, infoSecurePort=0, ipcPort=40603, storageInfo=lv=-57;cid=testClusterID;nsid=1482375018;c=1733721161171) 2024-12-09T05:12:43,571 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xafc7278c44bb6212 with lease ID 0x7c26281da0fb8342: from storage DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148 node DatanodeRegistration(127.0.0.1:33923, datanodeUuid=0828d983-17ca-4fea-a29b-64945bbde071, infoPort=42655, infoSecurePort=0, ipcPort=40603, storageInfo=lv=-57;cid=testClusterID;nsid=1482375018;c=1733721161171), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-09T05:12:43,572 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x73bd128a8172ec79 with lease ID 0x7c26281da0fb8341: Processing first storage report for DS-cdec4faf-a1a2-4c41-8b42-287093eaef39 from datanode DatanodeRegistration(127.0.0.1:42421, datanodeUuid=b5f42ee2-9770-4b5b-b8e6-3795094a5e7b, infoPort=35487, infoSecurePort=0, ipcPort=44131, storageInfo=lv=-57;cid=testClusterID;nsid=1482375018;c=1733721161171) 2024-12-09T05:12:43,572 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x73bd128a8172ec79 with lease ID 0x7c26281da0fb8341: from storage DS-cdec4faf-a1a2-4c41-8b42-287093eaef39 node DatanodeRegistration(127.0.0.1:42421, datanodeUuid=b5f42ee2-9770-4b5b-b8e6-3795094a5e7b, infoPort=35487, infoSecurePort=0, ipcPort=44131, storageInfo=lv=-57;cid=testClusterID;nsid=1482375018;c=1733721161171), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:12:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xafc7278c44bb6212 with lease ID 0x7c26281da0fb8342: Processing first storage report for DS-4eef3769-90dd-4d10-83da-a3fca982502f from datanode DatanodeRegistration(127.0.0.1:33923, datanodeUuid=0828d983-17ca-4fea-a29b-64945bbde071, infoPort=42655, infoSecurePort=0, ipcPort=40603, storageInfo=lv=-57;cid=testClusterID;nsid=1482375018;c=1733721161171) 2024-12-09T05:12:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xafc7278c44bb6212 with lease ID 0x7c26281da0fb8342: from storage DS-4eef3769-90dd-4d10-83da-a3fca982502f node DatanodeRegistration(127.0.0.1:33923, datanodeUuid=0828d983-17ca-4fea-a29b-64945bbde071, infoPort=42655, infoSecurePort=0, ipcPort=40603, storageInfo=lv=-57;cid=testClusterID;nsid=1482375018;c=1733721161171), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T05:12:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x73bd128a8172ec79 with lease ID 0x7c26281da0fb8341: Processing first storage report for DS-faedb5b0-3e73-4747-9680-57b0ba0122ea from datanode DatanodeRegistration(127.0.0.1:42421, datanodeUuid=b5f42ee2-9770-4b5b-b8e6-3795094a5e7b, infoPort=35487, infoSecurePort=0, ipcPort=44131, storageInfo=lv=-57;cid=testClusterID;nsid=1482375018;c=1733721161171) 2024-12-09T05:12:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x73bd128a8172ec79 with lease ID 0x7c26281da0fb8341: from storage DS-faedb5b0-3e73-4747-9680-57b0ba0122ea node DatanodeRegistration(127.0.0.1:42421, datanodeUuid=b5f42ee2-9770-4b5b-b8e6-3795094a5e7b, infoPort=35487, infoSecurePort=0, ipcPort=44131, storageInfo=lv=-57;cid=testClusterID;nsid=1482375018;c=1733721161171), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:12:43,685 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794 2024-12-09T05:12:43,784 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/cluster_55a5741f-dc1e-8fed-1f25-ffd4352cce64/zookeeper_0, clientPort=52383, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/cluster_55a5741f-dc1e-8fed-1f25-ffd4352cce64/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/cluster_55a5741f-dc1e-8fed-1f25-ffd4352cce64/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T05:12:43,795 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=52383 2024-12-09T05:12:43,809 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:12:43,813 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:12:44,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:12:44,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:12:44,500 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f with version=8 2024-12-09T05:12:44,500 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/hbase-staging 2024-12-09T05:12:44,641 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T05:12:44,932 INFO [Time-limited test {}] client.ConnectionUtils(129): master/41a709354867:0 server-side Connection retries=45 2024-12-09T05:12:44,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:12:44,953 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:12:44,953 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:12:44,953 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:12:44,953 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:12:45,119 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:12:45,192 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T05:12:45,202 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T05:12:45,206 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:12:45,238 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 28578 (auto-detected) 2024-12-09T05:12:45,239 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T05:12:45,263 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33145 2024-12-09T05:12:45,273 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:12:45,275 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:12:45,290 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33145 connecting to ZooKeeper ensemble=127.0.0.1:52383 2024-12-09T05:12:45,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:331450x0, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:12:45,337 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33145-0x100753140d70000 connected 2024-12-09T05:12:45,387 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:12:45,391 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:12:45,394 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:12:45,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33145 2024-12-09T05:12:45,401 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33145 2024-12-09T05:12:45,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33145 2024-12-09T05:12:45,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33145 2024-12-09T05:12:45,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33145 2024-12-09T05:12:45,414 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f, hbase.cluster.distributed=false 2024-12-09T05:12:45,482 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/41a709354867:0 server-side Connection retries=45 2024-12-09T05:12:45,483 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:12:45,483 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:12:45,483 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:12:45,484 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:12:45,484 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:12:45,487 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:12:45,490 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:12:45,493 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41921 2024-12-09T05:12:45,496 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:12:45,503 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:12:45,505 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:12:45,511 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:12:45,517 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41921 connecting to ZooKeeper ensemble=127.0.0.1:52383 2024-12-09T05:12:45,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:419210x0, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:12:45,522 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:419210x0, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:12:45,524 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:419210x0, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:12:45,525 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:419210x0, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:12:45,528 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41921-0x100753140d70001 connected 2024-12-09T05:12:45,532 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41921 2024-12-09T05:12:45,532 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41921 2024-12-09T05:12:45,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41921 2024-12-09T05:12:45,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41921 2024-12-09T05:12:45,537 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41921 2024-12-09T05:12:45,539 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/41a709354867,33145,1733721164634 2024-12-09T05:12:45,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:12:45,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:12:45,548 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/41a709354867,33145,1733721164634 2024-12-09T05:12:45,555 DEBUG [M:0;41a709354867:33145 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;41a709354867:33145 2024-12-09T05:12:45,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:12:45,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:12:45,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:45,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:45,570 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:12:45,571 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/41a709354867,33145,1733721164634 from backup master directory 2024-12-09T05:12:45,571 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:12:45,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/41a709354867,33145,1733721164634 2024-12-09T05:12:45,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:12:45,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:12:45,575 WARN [master/41a709354867:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:12:45,575 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=41a709354867,33145,1733721164634 2024-12-09T05:12:45,578 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T05:12:45,579 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T05:12:45,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:12:45,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:12:45,667 DEBUG [master/41a709354867:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/hbase.id with ID: 3448f36c-9c91-47e2-90b8-248ec102328d 2024-12-09T05:12:45,711 INFO [master/41a709354867:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:12:45,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:45,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:45,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:12:45,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:12:45,775 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:12:45,777 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T05:12:45,784 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:12:45,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:12:45,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:12:45,846 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store 2024-12-09T05:12:45,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:12:45,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:12:45,875 INFO [master/41a709354867:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T05:12:45,876 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:12:45,877 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:12:45,877 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:12:45,878 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:12:45,878 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:12:45,878 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:12:45,878 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:12:45,878 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:12:45,880 WARN [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/.initializing 2024-12-09T05:12:45,881 DEBUG [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/WALs/41a709354867,33145,1733721164634 2024-12-09T05:12:45,897 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C33145%2C1733721164634, suffix=, logDir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/WALs/41a709354867,33145,1733721164634, archiveDir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/oldWALs, maxLogs=10 2024-12-09T05:12:45,908 INFO [master/41a709354867:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C33145%2C1733721164634.1733721165905 2024-12-09T05:12:45,909 DEBUG [master/41a709354867:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(752): Using builder API via reflection for DFS file creation replicate flag. 2024-12-09T05:12:45,909 DEBUG [master/41a709354867:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(762): Using builder API via reflection for DFS file creation noLocalWrite flag. 2024-12-09T05:12:45,932 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/WALs/41a709354867,33145,1733721164634/41a709354867%2C33145%2C1733721164634.1733721165905 2024-12-09T05:12:45,941 DEBUG [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35487:35487),(127.0.0.1/127.0.0.1:42655:42655)] 2024-12-09T05:12:45,941 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:12:45,942 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:12:45,945 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:12:45,947 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:12:45,984 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:12:46,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T05:12:46,012 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:46,015 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:12:46,016 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:12:46,019 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T05:12:46,020 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:46,021 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:12:46,021 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:12:46,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T05:12:46,024 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:46,025 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:12:46,025 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:12:46,027 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T05:12:46,028 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:46,029 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:12:46,032 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:12:46,034 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:12:46,044 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T05:12:46,047 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:12:46,051 DEBUG [master/41a709354867:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:12:46,052 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753513, jitterRate=-0.041859760880470276}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T05:12:46,057 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:12:46,058 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T05:12:46,092 DEBUG [master/41a709354867:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@140c117b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:12:46,128 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-09T05:12:46,139 INFO [master/41a709354867:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T05:12:46,139 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T05:12:46,142 INFO [master/41a709354867:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T05:12:46,143 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T05:12:46,148 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-09T05:12:46,148 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T05:12:46,178 INFO [master/41a709354867:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T05:12:46,192 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T05:12:46,194 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-09T05:12:46,197 INFO [master/41a709354867:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T05:12:46,198 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T05:12:46,200 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-09T05:12:46,201 INFO [master/41a709354867:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T05:12:46,205 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T05:12:46,206 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-09T05:12:46,207 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T05:12:46,209 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T05:12:46,219 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T05:12:46,220 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T05:12:46,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:12:46,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:12:46,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:46,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:46,225 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=41a709354867,33145,1733721164634, sessionid=0x100753140d70000, setting cluster-up flag (Was=false) 2024-12-09T05:12:46,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:46,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:46,249 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T05:12:46,251 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=41a709354867,33145,1733721164634 2024-12-09T05:12:46,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:46,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:46,262 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T05:12:46,263 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=41a709354867,33145,1733721164634 2024-12-09T05:12:46,353 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-09T05:12:46,354 DEBUG [RS:0;41a709354867:41921 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;41a709354867:41921 2024-12-09T05:12:46,356 INFO [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(1008): ClusterId : 3448f36c-9c91-47e2-90b8-248ec102328d 2024-12-09T05:12:46,359 INFO [master/41a709354867:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-09T05:12:46,360 DEBUG [RS:0;41a709354867:41921 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:12:46,362 INFO [master/41a709354867:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T05:12:46,366 DEBUG [RS:0;41a709354867:41921 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:12:46,366 DEBUG [RS:0;41a709354867:41921 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:12:46,369 DEBUG [RS:0;41a709354867:41921 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:12:46,370 DEBUG [RS:0;41a709354867:41921 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cee3d7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:12:46,369 DEBUG [master/41a709354867:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 41a709354867,33145,1733721164634 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T05:12:46,372 DEBUG [RS:0;41a709354867:41921 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ad4d39a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:12:46,374 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:12:46,374 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:12:46,374 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:12:46,375 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:12:46,375 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/41a709354867:0, corePoolSize=10, maxPoolSize=10 2024-12-09T05:12:46,375 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:12:46,375 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:12:46,376 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:12:46,377 INFO [RS:0;41a709354867:41921 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-09T05:12:46,377 INFO [RS:0;41a709354867:41921 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-09T05:12:46,377 DEBUG [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-09T05:12:46,380 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733721196380 2024-12-09T05:12:46,380 INFO [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(3073): reportForDuty to master=41a709354867,33145,1733721164634 with isa=41a709354867/172.17.0.2:41921, startcode=1733721165481 2024-12-09T05:12:46,382 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T05:12:46,382 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:12:46,383 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-09T05:12:46,383 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T05:12:46,387 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T05:12:46,387 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T05:12:46,388 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T05:12:46,388 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T05:12:46,388 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:46,389 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:12:46,391 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:46,394 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T05:12:46,396 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T05:12:46,396 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T05:12:46,397 DEBUG [RS:0;41a709354867:41921 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:12:46,399 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T05:12:46,399 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T05:12:46,404 DEBUG [master/41a709354867:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721166401,5,FailOnTimeoutGroup] 2024-12-09T05:12:46,404 DEBUG [master/41a709354867:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721166404,5,FailOnTimeoutGroup] 2024-12-09T05:12:46,404 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:46,405 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T05:12:46,407 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:46,407 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:46,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:12:46,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:12:46,424 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-09T05:12:46,424 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f 2024-12-09T05:12:46,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:12:46,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:12:46,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:12:46,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:12:46,449 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:12:46,449 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:46,450 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:12:46,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:12:46,454 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:12:46,454 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:46,455 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:12:46,455 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:12:46,458 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:12:46,458 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:46,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:12:46,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740 2024-12-09T05:12:46,462 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740 2024-12-09T05:12:46,468 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T05:12:46,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T05:12:46,475 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:12:46,476 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830250, jitterRate=0.05571801960468292}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T05:12:46,478 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T05:12:46,478 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:12:46,478 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:12:46,478 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:12:46,478 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:12:46,478 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:12:46,479 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T05:12:46,479 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:12:46,482 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41171, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:12:46,483 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:12:46,483 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-09T05:12:46,489 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T05:12:46,489 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33145 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 41a709354867,41921,1733721165481 2024-12-09T05:12:46,491 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33145 {}] master.ServerManager(486): Registering regionserver=41a709354867,41921,1733721165481 2024-12-09T05:12:46,497 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T05:12:46,499 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T05:12:46,506 DEBUG [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f 2024-12-09T05:12:46,506 DEBUG [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:33833 2024-12-09T05:12:46,506 DEBUG [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-09T05:12:46,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:12:46,511 DEBUG [RS:0;41a709354867:41921 {}] zookeeper.ZKUtil(111): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/41a709354867,41921,1733721165481 2024-12-09T05:12:46,511 WARN [RS:0;41a709354867:41921 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:12:46,512 INFO [RS:0;41a709354867:41921 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:12:46,512 DEBUG [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481 2024-12-09T05:12:46,514 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [41a709354867,41921,1733721165481] 2024-12-09T05:12:46,526 DEBUG [RS:0;41a709354867:41921 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-09T05:12:46,537 INFO [RS:0;41a709354867:41921 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:12:46,549 INFO [RS:0;41a709354867:41921 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:12:46,553 INFO [RS:0;41a709354867:41921 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:12:46,553 INFO [RS:0;41a709354867:41921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:46,554 INFO [RS:0;41a709354867:41921 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-09T05:12:46,562 INFO [RS:0;41a709354867:41921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:46,562 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:12:46,563 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:12:46,563 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:12:46,563 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:12:46,563 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:12:46,563 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:12:46,564 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:12:46,564 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:12:46,564 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:12:46,564 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:12:46,565 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:12:46,565 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:12:46,565 DEBUG [RS:0;41a709354867:41921 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:12:46,566 INFO [RS:0;41a709354867:41921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:46,566 INFO [RS:0;41a709354867:41921 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:46,566 INFO [RS:0;41a709354867:41921 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:46,567 INFO [RS:0;41a709354867:41921 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:46,567 INFO [RS:0;41a709354867:41921 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,41921,1733721165481-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:12:46,593 INFO [RS:0;41a709354867:41921 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:12:46,595 INFO [RS:0;41a709354867:41921 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,41921,1733721165481-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:46,620 INFO [RS:0;41a709354867:41921 {}] regionserver.Replication(204): 41a709354867,41921,1733721165481 started 2024-12-09T05:12:46,620 INFO [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(1767): Serving as 41a709354867,41921,1733721165481, RpcServer on 41a709354867/172.17.0.2:41921, sessionid=0x100753140d70001 2024-12-09T05:12:46,621 DEBUG [RS:0;41a709354867:41921 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:12:46,621 DEBUG [RS:0;41a709354867:41921 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 41a709354867,41921,1733721165481 2024-12-09T05:12:46,622 DEBUG [RS:0;41a709354867:41921 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,41921,1733721165481' 2024-12-09T05:12:46,622 DEBUG [RS:0;41a709354867:41921 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:12:46,623 DEBUG [RS:0;41a709354867:41921 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:12:46,623 DEBUG [RS:0;41a709354867:41921 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:12:46,623 DEBUG [RS:0;41a709354867:41921 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:12:46,623 DEBUG [RS:0;41a709354867:41921 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 41a709354867,41921,1733721165481 2024-12-09T05:12:46,624 DEBUG [RS:0;41a709354867:41921 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,41921,1733721165481' 2024-12-09T05:12:46,624 DEBUG [RS:0;41a709354867:41921 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:12:46,624 DEBUG [RS:0;41a709354867:41921 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:12:46,625 DEBUG [RS:0;41a709354867:41921 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:12:46,625 INFO [RS:0;41a709354867:41921 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:12:46,625 INFO [RS:0;41a709354867:41921 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:12:46,650 WARN [41a709354867:33145 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-09T05:12:46,734 INFO [RS:0;41a709354867:41921 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C41921%2C1733721165481, suffix=, logDir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481, archiveDir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/oldWALs, maxLogs=32 2024-12-09T05:12:46,737 INFO [RS:0;41a709354867:41921 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41921%2C1733721165481.1733721166736 2024-12-09T05:12:46,746 INFO [RS:0;41a709354867:41921 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721166736 2024-12-09T05:12:46,746 DEBUG [RS:0;41a709354867:41921 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42655:42655),(127.0.0.1/127.0.0.1:35487:35487)] 2024-12-09T05:12:46,902 DEBUG [41a709354867:33145 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T05:12:46,906 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=41a709354867,41921,1733721165481 2024-12-09T05:12:46,911 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 41a709354867,41921,1733721165481, state=OPENING 2024-12-09T05:12:46,917 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T05:12:46,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:46,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:46,920 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:12:46,920 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:12:46,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=41a709354867,41921,1733721165481}] 2024-12-09T05:12:47,096 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 41a709354867,41921,1733721165481 2024-12-09T05:12:47,098 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T05:12:47,102 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37432, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T05:12:47,113 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-09T05:12:47,114 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:12:47,119 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C41921%2C1733721165481.meta, suffix=.meta, logDir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481, archiveDir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/oldWALs, maxLogs=32 2024-12-09T05:12:47,122 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41921%2C1733721165481.meta.1733721167121.meta 2024-12-09T05:12:47,135 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.meta.1733721167121.meta 2024-12-09T05:12:47,135 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42655:42655),(127.0.0.1/127.0.0.1:35487:35487)] 2024-12-09T05:12:47,135 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:12:47,137 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T05:12:47,196 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T05:12:47,201 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T05:12:47,205 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T05:12:47,206 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:12:47,206 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-09T05:12:47,206 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-09T05:12:47,209 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:12:47,211 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:12:47,211 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:47,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:12:47,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:12:47,214 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:12:47,214 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:47,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:12:47,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:12:47,216 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:12:47,216 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:47,217 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:12:47,219 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740 2024-12-09T05:12:47,221 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740 2024-12-09T05:12:47,223 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T05:12:47,226 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T05:12:47,228 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692278, jitterRate=-0.11972410976886749}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T05:12:47,229 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T05:12:47,236 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733721167091 2024-12-09T05:12:47,248 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T05:12:47,248 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-09T05:12:47,249 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,41921,1733721165481 2024-12-09T05:12:47,251 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 41a709354867,41921,1733721165481, state=OPEN 2024-12-09T05:12:47,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:12:47,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:12:47,257 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:12:47,257 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:12:47,261 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T05:12:47,262 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=41a709354867,41921,1733721165481 in 335 msec 2024-12-09T05:12:47,266 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T05:12:47,266 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 774 msec 2024-12-09T05:12:47,272 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 970 msec 2024-12-09T05:12:47,276 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733721167275, completionTime=-1 2024-12-09T05:12:47,276 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T05:12:47,276 DEBUG [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-09T05:12:47,322 DEBUG [hconnection-0xdb679d6-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:12:47,324 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37440, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:12:47,335 INFO [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-09T05:12:47,335 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733721227335 2024-12-09T05:12:47,336 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733721287336 2024-12-09T05:12:47,336 INFO [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 59 msec 2024-12-09T05:12:47,362 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,33145,1733721164634-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:47,363 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,33145,1733721164634-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:47,363 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,33145,1733721164634-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:47,365 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-41a709354867:33145, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:47,365 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:47,373 DEBUG [master/41a709354867:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-09T05:12:47,376 INFO [master/41a709354867:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-09T05:12:47,377 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:12:47,384 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-09T05:12:47,387 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:12:47,388 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:47,390 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:12:47,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:12:47,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:12:47,410 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d537f55d10ac7a1ac262a45953418c2d, NAME => 'hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f 2024-12-09T05:12:47,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:12:47,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:12:47,422 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:12:47,422 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing d537f55d10ac7a1ac262a45953418c2d, disabling compactions & flushes 2024-12-09T05:12:47,423 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. 2024-12-09T05:12:47,423 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. 2024-12-09T05:12:47,423 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. after waiting 0 ms 2024-12-09T05:12:47,423 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. 2024-12-09T05:12:47,423 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. 2024-12-09T05:12:47,423 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for d537f55d10ac7a1ac262a45953418c2d: 2024-12-09T05:12:47,425 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:12:47,432 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733721167426"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721167426"}]},"ts":"1733721167426"} 2024-12-09T05:12:47,463 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T05:12:47,466 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:12:47,470 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721167466"}]},"ts":"1733721167466"} 2024-12-09T05:12:47,477 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-09T05:12:47,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d537f55d10ac7a1ac262a45953418c2d, ASSIGN}] 2024-12-09T05:12:47,489 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d537f55d10ac7a1ac262a45953418c2d, ASSIGN 2024-12-09T05:12:47,491 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=d537f55d10ac7a1ac262a45953418c2d, ASSIGN; state=OFFLINE, location=41a709354867,41921,1733721165481; forceNewPlan=false, retain=false 2024-12-09T05:12:47,642 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d537f55d10ac7a1ac262a45953418c2d, regionState=OPENING, regionLocation=41a709354867,41921,1733721165481 2024-12-09T05:12:47,647 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure d537f55d10ac7a1ac262a45953418c2d, server=41a709354867,41921,1733721165481}] 2024-12-09T05:12:47,801 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 41a709354867,41921,1733721165481 2024-12-09T05:12:47,809 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. 2024-12-09T05:12:47,810 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => d537f55d10ac7a1ac262a45953418c2d, NAME => 'hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:12:47,810 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace d537f55d10ac7a1ac262a45953418c2d 2024-12-09T05:12:47,811 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:12:47,811 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for d537f55d10ac7a1ac262a45953418c2d 2024-12-09T05:12:47,811 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for d537f55d10ac7a1ac262a45953418c2d 2024-12-09T05:12:47,814 INFO [StoreOpener-d537f55d10ac7a1ac262a45953418c2d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d537f55d10ac7a1ac262a45953418c2d 2024-12-09T05:12:47,817 INFO [StoreOpener-d537f55d10ac7a1ac262a45953418c2d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d537f55d10ac7a1ac262a45953418c2d columnFamilyName info 2024-12-09T05:12:47,817 DEBUG [StoreOpener-d537f55d10ac7a1ac262a45953418c2d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:47,818 INFO [StoreOpener-d537f55d10ac7a1ac262a45953418c2d-1 {}] regionserver.HStore(327): Store=d537f55d10ac7a1ac262a45953418c2d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:12:47,820 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/namespace/d537f55d10ac7a1ac262a45953418c2d 2024-12-09T05:12:47,821 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/namespace/d537f55d10ac7a1ac262a45953418c2d 2024-12-09T05:12:47,825 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for d537f55d10ac7a1ac262a45953418c2d 2024-12-09T05:12:47,829 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/namespace/d537f55d10ac7a1ac262a45953418c2d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:12:47,830 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened d537f55d10ac7a1ac262a45953418c2d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718413, jitterRate=-0.08649145066738129}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:12:47,832 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for d537f55d10ac7a1ac262a45953418c2d: 2024-12-09T05:12:47,834 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d., pid=6, masterSystemTime=1733721167801 2024-12-09T05:12:47,838 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. 2024-12-09T05:12:47,838 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. 2024-12-09T05:12:47,839 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d537f55d10ac7a1ac262a45953418c2d, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,41921,1733721165481 2024-12-09T05:12:47,847 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T05:12:47,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure d537f55d10ac7a1ac262a45953418c2d, server=41a709354867,41921,1733721165481 in 195 msec 2024-12-09T05:12:47,851 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T05:12:47,851 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=d537f55d10ac7a1ac262a45953418c2d, ASSIGN in 363 msec 2024-12-09T05:12:47,852 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:12:47,852 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721167852"}]},"ts":"1733721167852"} 2024-12-09T05:12:47,855 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-09T05:12:47,859 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:12:47,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 481 msec 2024-12-09T05:12:47,887 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-09T05:12:47,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:12:47,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:47,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:12:47,921 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-09T05:12:47,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:12:47,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 23 msec 2024-12-09T05:12:47,959 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-09T05:12:47,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:12:47,982 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 24 msec 2024-12-09T05:12:47,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-09T05:12:48,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-09T05:12:48,003 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 2.427sec 2024-12-09T05:12:48,005 INFO [master/41a709354867:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T05:12:48,007 INFO [master/41a709354867:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T05:12:48,008 INFO [master/41a709354867:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T05:12:48,009 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T05:12:48,009 INFO [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T05:12:48,010 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,33145,1733721164634-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:12:48,011 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,33145,1733721164634-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T05:12:48,018 DEBUG [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-09T05:12:48,019 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T05:12:48,019 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,33145,1733721164634-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:12:48,057 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2056b3cd to 127.0.0.1:52383 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6410c843 2024-12-09T05:12:48,057 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-09T05:12:48,066 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@671fded9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:12:48,069 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T05:12:48,070 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T05:12:48,080 DEBUG [hconnection-0x6095e008-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:12:48,149 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37446, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:12:48,162 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=41a709354867,33145,1733721164634 2024-12-09T05:12:48,163 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:12:48,191 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-09T05:12:48,202 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T05:12:48,209 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T05:12:48,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33145 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T05:12:48,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33145 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T05:12:48,223 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33145 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:12:48,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33145 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-09T05:12:48,229 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:12:48,230 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:48,232 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:12:48,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33145 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 9 2024-12-09T05:12:48,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33145 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T05:12:48,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741837_1013 (size=389) 2024-12-09T05:12:48,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741837_1013 (size=389) 2024-12-09T05:12:48,282 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a8ea2e19ac6213389de31b5fc040ab59, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f 2024-12-09T05:12:48,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741838_1014 (size=72) 2024-12-09T05:12:48,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741838_1014 (size=72) 2024-12-09T05:12:48,330 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:12:48,330 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing a8ea2e19ac6213389de31b5fc040ab59, disabling compactions & flushes 2024-12-09T05:12:48,330 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:12:48,330 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:12:48,330 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. after waiting 0 ms 2024-12-09T05:12:48,330 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:12:48,331 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:12:48,331 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for a8ea2e19ac6213389de31b5fc040ab59: 2024-12-09T05:12:48,334 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:12:48,335 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733721168334"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721168334"}]},"ts":"1733721168334"} 2024-12-09T05:12:48,341 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T05:12:48,344 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:12:48,345 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721168345"}]},"ts":"1733721168345"} 2024-12-09T05:12:48,352 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-09T05:12:48,360 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a8ea2e19ac6213389de31b5fc040ab59, ASSIGN}] 2024-12-09T05:12:48,366 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a8ea2e19ac6213389de31b5fc040ab59, ASSIGN 2024-12-09T05:12:48,369 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a8ea2e19ac6213389de31b5fc040ab59, ASSIGN; state=OFFLINE, location=41a709354867,41921,1733721165481; forceNewPlan=false, retain=false 2024-12-09T05:12:48,520 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=a8ea2e19ac6213389de31b5fc040ab59, regionState=OPENING, regionLocation=41a709354867,41921,1733721165481 2024-12-09T05:12:48,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure a8ea2e19ac6213389de31b5fc040ab59, server=41a709354867,41921,1733721165481}] 2024-12-09T05:12:48,677 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 41a709354867,41921,1733721165481 2024-12-09T05:12:48,684 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:12:48,685 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => a8ea2e19ac6213389de31b5fc040ab59, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:12:48,685 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling a8ea2e19ac6213389de31b5fc040ab59 2024-12-09T05:12:48,685 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:12:48,685 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for a8ea2e19ac6213389de31b5fc040ab59 2024-12-09T05:12:48,685 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for a8ea2e19ac6213389de31b5fc040ab59 2024-12-09T05:12:48,688 INFO [StoreOpener-a8ea2e19ac6213389de31b5fc040ab59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a8ea2e19ac6213389de31b5fc040ab59 2024-12-09T05:12:48,690 INFO [StoreOpener-a8ea2e19ac6213389de31b5fc040ab59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a8ea2e19ac6213389de31b5fc040ab59 columnFamilyName info 2024-12-09T05:12:48,690 DEBUG [StoreOpener-a8ea2e19ac6213389de31b5fc040ab59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:12:48,691 INFO [StoreOpener-a8ea2e19ac6213389de31b5fc040ab59-1 {}] regionserver.HStore(327): Store=a8ea2e19ac6213389de31b5fc040ab59/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:12:48,693 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59 2024-12-09T05:12:48,693 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59 2024-12-09T05:12:48,697 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for a8ea2e19ac6213389de31b5fc040ab59 2024-12-09T05:12:48,700 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:12:48,701 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened a8ea2e19ac6213389de31b5fc040ab59; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846954, jitterRate=0.07695847749710083}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:12:48,702 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for a8ea2e19ac6213389de31b5fc040ab59: 2024-12-09T05:12:48,704 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59., pid=11, masterSystemTime=1733721168677 2024-12-09T05:12:48,707 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:12:48,707 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:12:48,708 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=a8ea2e19ac6213389de31b5fc040ab59, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,41921,1733721165481 2024-12-09T05:12:48,714 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-09T05:12:48,714 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure a8ea2e19ac6213389de31b5fc040ab59, server=41a709354867,41921,1733721165481 in 188 msec 2024-12-09T05:12:48,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T05:12:48,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a8ea2e19ac6213389de31b5fc040ab59, ASSIGN in 355 msec 2024-12-09T05:12:48,719 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:12:48,719 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721168719"}]},"ts":"1733721168719"} 2024-12-09T05:12:48,722 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-09T05:12:48,726 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:12:48,728 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 502 msec 2024-12-09T05:12:52,710 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T05:12:52,751 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T05:12:52,753 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-09T05:12:52,754 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-09T05:12:55,189 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T05:12:55,189 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T05:12:55,191 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T05:12:55,191 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T05:12:55,193 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-09T05:12:55,193 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-09T05:12:55,195 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T05:12:55,195 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T05:12:55,195 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T05:12:55,195 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T05:12:58,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33145 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T05:12:58,249 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling, procId: 9 completed 2024-12-09T05:12:58,253 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-09T05:12:58,254 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:12:58,255 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41921%2C1733721165481.1733721178254 2024-12-09T05:12:58,265 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721166736 with entries=4, filesize=947 B; new WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721178254 2024-12-09T05:12:58,266 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42655:42655),(127.0.0.1/127.0.0.1:35487:35487)] 2024-12-09T05:12:58,266 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721166736 is not closed yet, will try archiving it next time 2024-12-09T05:12:58,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741833_1009 (size=955) 2024-12-09T05:12:58,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741833_1009 (size=955) 2024-12-09T05:13:10,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41921 {}] regionserver.HRegion(8581): Flush requested on a8ea2e19ac6213389de31b5fc040ab59 2024-12-09T05:13:10,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8ea2e19ac6213389de31b5fc040ab59 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:13:10,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/697c545300894470b2a8f874b5664f99 is 1080, key is row0001/info:/1733721178272/Put/seqid=0 2024-12-09T05:13:10,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741840_1016 (size=12509) 2024-12-09T05:13:10,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741840_1016 (size=12509) 2024-12-09T05:13:10,381 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/697c545300894470b2a8f874b5664f99 2024-12-09T05:13:10,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/697c545300894470b2a8f874b5664f99 as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/697c545300894470b2a8f874b5664f99 2024-12-09T05:13:10,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/697c545300894470b2a8f874b5664f99, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T05:13:10,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a8ea2e19ac6213389de31b5fc040ab59 in 150ms, sequenceid=11, compaction requested=false 2024-12-09T05:13:10,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8ea2e19ac6213389de31b5fc040ab59: 2024-12-09T05:13:13,680 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T05:13:18,307 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41921%2C1733721165481.1733721198306 2024-12-09T05:13:18,374 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:13:18,376 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51152, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:13:18,515 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:18,517 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721178254 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721198306 2024-12-09T05:13:18,517 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42655:42655),(127.0.0.1/127.0.0.1:35487:35487)] 2024-12-09T05:13:18,517 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721178254 is not closed yet, will try archiving it next time 2024-12-09T05:13:18,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741839_1015 (size=12399) 2024-12-09T05:13:18,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741839_1015 (size=12399) 2024-12-09T05:13:18,720 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:20,924 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:23,127 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:25,331 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41921 {}] regionserver.HRegion(8581): Flush requested on a8ea2e19ac6213389de31b5fc040ab59 2024-12-09T05:13:25,332 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8ea2e19ac6213389de31b5fc040ab59 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:13:25,533 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:25,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/0df56eeb0d0c40daaf7057a982774a1e is 1080, key is row0008/info:/1733721192297/Put/seqid=0 2024-12-09T05:13:25,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741842_1018 (size=12509) 2024-12-09T05:13:25,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741842_1018 (size=12509) 2024-12-09T05:13:25,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/0df56eeb0d0c40daaf7057a982774a1e 2024-12-09T05:13:25,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/0df56eeb0d0c40daaf7057a982774a1e as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/0df56eeb0d0c40daaf7057a982774a1e 2024-12-09T05:13:25,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/0df56eeb0d0c40daaf7057a982774a1e, entries=7, sequenceid=21, filesize=12.2 K 2024-12-09T05:13:25,789 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:25,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a8ea2e19ac6213389de31b5fc040ab59 in 458ms, sequenceid=21, compaction requested=false 2024-12-09T05:13:25,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8ea2e19ac6213389de31b5fc040ab59: 2024-12-09T05:13:25,790 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=24.4 K, sizeToCheck=16.0 K 2024-12-09T05:13:25,790 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T05:13:25,791 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/697c545300894470b2a8f874b5664f99 because midkey is the same as first or last row 2024-12-09T05:13:27,535 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:28,020 INFO [master/41a709354867:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T05:13:28,020 INFO [master/41a709354867:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T05:13:29,738 WARN [sync.1 {}] wal.AbstractFSWAL(1346): Requesting log roll because we exceeded slow sync threshold; count=7, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:29,740 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C41921%2C1733721165481:(num 1733721198306) roll requested 2024-12-09T05:13:29,740 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:29,740 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41921%2C1733721165481.1733721209740 2024-12-09T05:13:29,975 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 232 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:30,175 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:30,176 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721198306 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721209740 2024-12-09T05:13:30,176 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35487:35487),(127.0.0.1/127.0.0.1:42655:42655)] 2024-12-09T05:13:30,176 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721198306 is not closed yet, will try archiving it next time 2024-12-09T05:13:30,177 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721178254 to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/oldWALs/41a709354867%2C41921%2C1733721165481.1733721178254 2024-12-09T05:13:30,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741841_1017 (size=7739) 2024-12-09T05:13:30,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741841_1017 (size=7739) 2024-12-09T05:13:31,942 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:13:33,686 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region a8ea2e19ac6213389de31b5fc040ab59, had cached 0 bytes from a total of 25018 2024-12-09T05:13:34,146 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:13:36,349 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:13:38,552 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:13:40,555 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T05:13:40,555 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41921%2C1733721165481.1733721220555 2024-12-09T05:13:43,681 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T05:13:45,565 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:13:45,565 WARN [Time-limited test {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:13:45,566 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C41921%2C1733721165481:(num 1733721220555) roll requested 2024-12-09T05:13:47,367 DEBUG [master/41a709354867:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region d537f55d10ac7a1ac262a45953418c2d changed from -1.0 to 0.0, refreshing cache 2024-12-09T05:13:50,566 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:13:50,566 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:13:50,567 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721209740 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721220555 2024-12-09T05:13:50,567 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42655:42655),(127.0.0.1/127.0.0.1:35487:35487)] 2024-12-09T05:13:50,567 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721209740 is not closed yet, will try archiving it next time 2024-12-09T05:13:50,568 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41921%2C1733721165481.1733721230567 2024-12-09T05:13:50,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741843_1019 (size=4753) 2024-12-09T05:13:50,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741843_1019 (size=4753) 2024-12-09T05:13:55,570 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41921 {}] regionserver.HRegion(8581): Flush requested on a8ea2e19ac6213389de31b5fc040ab59 2024-12-09T05:13:55,570 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:55,571 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8ea2e19ac6213389de31b5fc040ab59 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:13:55,576 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:55,576 WARN [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:13:57,571 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T05:14:00,572 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:14:00,572 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:14:00,576 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:14:00,577 WARN [sync.0 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK], DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK]] 2024-12-09T05:14:00,578 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721220555 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721230567 2024-12-09T05:14:00,578 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35487:35487),(127.0.0.1/127.0.0.1:42655:42655)] 2024-12-09T05:14:00,578 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721220555 is not closed yet, will try archiving it next time 2024-12-09T05:14:00,578 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C41921%2C1733721165481:(num 1733721230567) roll requested 2024-12-09T05:14:00,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/b47660d4e7194f89b6984f6a06808ebe is 1080, key is row0015/info:/1733721207334/Put/seqid=0 2024-12-09T05:14:00,578 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41921%2C1733721165481.1733721240578 2024-12-09T05:14:00,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741844_1020 (size=1569) 2024-12-09T05:14:00,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741844_1020 (size=1569) 2024-12-09T05:14:00,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741846_1022 (size=12509) 2024-12-09T05:14:00,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741846_1022 (size=12509) 2024-12-09T05:14:00,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/b47660d4e7194f89b6984f6a06808ebe 2024-12-09T05:14:00,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/b47660d4e7194f89b6984f6a06808ebe as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/b47660d4e7194f89b6984f6a06808ebe 2024-12-09T05:14:00,610 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/b47660d4e7194f89b6984f6a06808ebe, entries=7, sequenceid=31, filesize=12.2 K 2024-12-09T05:14:05,586 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:14:05,586 WARN [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:14:05,611 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:14:05,611 WARN [sync.1 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:14:05,611 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a8ea2e19ac6213389de31b5fc040ab59 in 10041ms, sequenceid=31, compaction requested=true 2024-12-09T05:14:05,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8ea2e19ac6213389de31b5fc040ab59: 2024-12-09T05:14:05,612 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=36.6 K, sizeToCheck=16.0 K 2024-12-09T05:14:05,612 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T05:14:05,612 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/697c545300894470b2a8f874b5664f99 because midkey is the same as first or last row 2024-12-09T05:14:05,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a8ea2e19ac6213389de31b5fc040ab59:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T05:14:05,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:14:05,614 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T05:14:05,617 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T05:14:05,619 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] regionserver.HStore(1540): a8ea2e19ac6213389de31b5fc040ab59/info is initiating minor compaction (all files) 2024-12-09T05:14:05,619 INFO [RS:0;41a709354867:41921-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a8ea2e19ac6213389de31b5fc040ab59/info in TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:14:05,619 INFO [RS:0;41a709354867:41921-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/697c545300894470b2a8f874b5664f99, hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/0df56eeb0d0c40daaf7057a982774a1e, hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/b47660d4e7194f89b6984f6a06808ebe] into tmpdir=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp, totalSize=36.6 K 2024-12-09T05:14:05,620 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] compactions.Compactor(224): Compacting 697c545300894470b2a8f874b5664f99, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733721178272 2024-12-09T05:14:05,621 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0df56eeb0d0c40daaf7057a982774a1e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733721192297 2024-12-09T05:14:05,621 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] compactions.Compactor(224): Compacting b47660d4e7194f89b6984f6a06808ebe, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733721207334 2024-12-09T05:14:05,645 INFO [RS:0;41a709354867:41921-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a8ea2e19ac6213389de31b5fc040ab59#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T05:14:05,646 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/fd73cb41cb8f44398d95af4fa56cc589 is 1080, key is row0001/info:/1733721178272/Put/seqid=0 2024-12-09T05:14:05,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741848_1024 (size=27710) 2024-12-09T05:14:05,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741848_1024 (size=27710) 2024-12-09T05:14:05,662 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/fd73cb41cb8f44398d95af4fa56cc589 as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/fd73cb41cb8f44398d95af4fa56cc589 2024-12-09T05:14:10,587 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:14:10,587 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:14:10,588 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721230567 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721240578 2024-12-09T05:14:10,588 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35487:35487),(127.0.0.1/127.0.0.1:42655:42655)] 2024-12-09T05:14:10,588 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721230567 is not closed yet, will try archiving it next time 2024-12-09T05:14:10,588 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721198306 to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/oldWALs/41a709354867%2C41921%2C1733721165481.1733721198306 2024-12-09T05:14:10,588 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C41921%2C1733721165481:(num 1733721250588) roll requested 2024-12-09T05:14:10,588 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41921%2C1733721165481.1733721250588 2024-12-09T05:14:10,591 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721209740 to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/oldWALs/41a709354867%2C41921%2C1733721165481.1733721209740 2024-12-09T05:14:10,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741845_1021 (size=438) 2024-12-09T05:14:10,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741845_1021 (size=438) 2024-12-09T05:14:10,593 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721220555 to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/oldWALs/41a709354867%2C41921%2C1733721165481.1733721220555 2024-12-09T05:14:10,992 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721230567 to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/oldWALs/41a709354867%2C41921%2C1733721165481.1733721230567 2024-12-09T05:14:13,681 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T05:14:15,589 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:14:15,589 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:14:15,591 INFO [RS:0;41a709354867:41921-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a8ea2e19ac6213389de31b5fc040ab59/info of a8ea2e19ac6213389de31b5fc040ab59 into fd73cb41cb8f44398d95af4fa56cc589(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 9sec to execute. 2024-12-09T05:14:15,591 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a8ea2e19ac6213389de31b5fc040ab59: 2024-12-09T05:14:15,591 INFO [RS:0;41a709354867:41921-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59., storeName=a8ea2e19ac6213389de31b5fc040ab59/info, priority=13, startTime=1733721245613; duration=9sec 2024-12-09T05:14:15,591 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=27.1 K, sizeToCheck=16.0 K 2024-12-09T05:14:15,591 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T05:14:15,591 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/fd73cb41cb8f44398d95af4fa56cc589 because midkey is the same as first or last row 2024-12-09T05:14:15,592 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:14:15,592 DEBUG [RS:0;41a709354867:41921-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a8ea2e19ac6213389de31b5fc040ab59:info 2024-12-09T05:14:15,600 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:14:15,600 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42421,DS-cdec4faf-a1a2-4c41-8b42-287093eaef39,DISK], DatanodeInfoWithStorage[127.0.0.1:33923,DS-ae1ca24f-2aa1-4963-9df8-d13ed163e148,DISK]] 2024-12-09T05:14:15,600 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721240578 with entries=1, filesize=531 B; new WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721250588 2024-12-09T05:14:15,600 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42655:42655),(127.0.0.1/127.0.0.1:35487:35487)] 2024-12-09T05:14:15,600 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721240578 is not closed yet, will try archiving it next time 2024-12-09T05:14:15,601 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41921%2C1733721165481.1733721255600 2024-12-09T05:14:15,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741847_1023 (size=539) 2024-12-09T05:14:15,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741847_1023 (size=539) 2024-12-09T05:14:15,604 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721240578 to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/oldWALs/41a709354867%2C41921%2C1733721165481.1733721240578 2024-12-09T05:14:15,610 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721250588 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721255600 2024-12-09T05:14:15,610 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42655:42655),(127.0.0.1/127.0.0.1:35487:35487)] 2024-12-09T05:14:15,610 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721250588 is not closed yet, will try archiving it next time 2024-12-09T05:14:15,610 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C41921%2C1733721165481:(num 1733721255600) roll requested 2024-12-09T05:14:15,611 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41921%2C1733721165481.1733721255610 2024-12-09T05:14:15,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741849_1025 (size=1258) 2024-12-09T05:14:15,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741849_1025 (size=1258) 2024-12-09T05:14:15,622 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721255600 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721255610 2024-12-09T05:14:15,622 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42655:42655),(127.0.0.1/127.0.0.1:35487:35487)] 2024-12-09T05:14:15,622 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721255600 is not closed yet, will try archiving it next time 2024-12-09T05:14:15,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741850_1026 (size=93) 2024-12-09T05:14:15,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741850_1026 (size=93) 2024-12-09T05:14:15,624 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481/41a709354867%2C41921%2C1733721165481.1733721255600 to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/oldWALs/41a709354867%2C41921%2C1733721165481.1733721255600 2024-12-09T05:14:18,686 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region a8ea2e19ac6213389de31b5fc040ab59, had cached 0 bytes from a total of 27710 2024-12-09T05:14:27,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41921 {}] regionserver.HRegion(8581): Flush requested on a8ea2e19ac6213389de31b5fc040ab59 2024-12-09T05:14:27,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8ea2e19ac6213389de31b5fc040ab59 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:14:27,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/b7ba929fad76454ba1a1a77ef971721b is 1080, key is row0022/info:/1733721255602/Put/seqid=0 2024-12-09T05:14:27,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741852_1028 (size=12509) 2024-12-09T05:14:27,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741852_1028 (size=12509) 2024-12-09T05:14:27,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/b7ba929fad76454ba1a1a77ef971721b 2024-12-09T05:14:27,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/b7ba929fad76454ba1a1a77ef971721b as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/b7ba929fad76454ba1a1a77ef971721b 2024-12-09T05:14:27,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/b7ba929fad76454ba1a1a77ef971721b, entries=7, sequenceid=42, filesize=12.2 K 2024-12-09T05:14:27,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a8ea2e19ac6213389de31b5fc040ab59 in 34ms, sequenceid=42, compaction requested=false 2024-12-09T05:14:27,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8ea2e19ac6213389de31b5fc040ab59: 2024-12-09T05:14:27,653 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=39.3 K, sizeToCheck=16.0 K 2024-12-09T05:14:27,653 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T05:14:27,654 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/fd73cb41cb8f44398d95af4fa56cc589 because midkey is the same as first or last row 2024-12-09T05:14:35,628 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-09T05:14:35,628 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-09T05:14:35,628 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2056b3cd to 127.0.0.1:52383 2024-12-09T05:14:35,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:14:35,629 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T05:14:35,629 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=901919515, stopped=false 2024-12-09T05:14:35,629 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=41a709354867,33145,1733721164634 2024-12-09T05:14:35,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:14:35,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:14:35,633 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-09T05:14:35,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:35,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:35,633 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:14:35,633 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,41921,1733721165481' ***** 2024-12-09T05:14:35,633 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-09T05:14:35,633 INFO [RS:0;41a709354867:41921 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:14:35,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:14:35,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:14:35,634 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-09T05:14:35,634 INFO [RS:0;41a709354867:41921 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:14:35,634 INFO [RS:0;41a709354867:41921 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:14:35,634 INFO [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(3579): Received CLOSE for a8ea2e19ac6213389de31b5fc040ab59 2024-12-09T05:14:35,634 INFO [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(3579): Received CLOSE for d537f55d10ac7a1ac262a45953418c2d 2024-12-09T05:14:35,634 INFO [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,41921,1733721165481 2024-12-09T05:14:35,635 DEBUG [RS:0;41a709354867:41921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:14:35,635 INFO [RS:0;41a709354867:41921 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:14:35,635 INFO [RS:0;41a709354867:41921 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:14:35,635 INFO [RS:0;41a709354867:41921 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:14:35,635 INFO [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-09T05:14:35,635 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing a8ea2e19ac6213389de31b5fc040ab59, disabling compactions & flushes 2024-12-09T05:14:35,635 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:14:35,635 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:14:35,635 INFO [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-09T05:14:35,635 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. after waiting 0 ms 2024-12-09T05:14:35,635 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:14:35,635 DEBUG [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(1603): Online Regions={a8ea2e19ac6213389de31b5fc040ab59=TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59., d537f55d10ac7a1ac262a45953418c2d=hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T05:14:35,635 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:14:35,635 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing a8ea2e19ac6213389de31b5fc040ab59 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-09T05:14:35,635 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:14:35,635 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:14:35,635 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:14:35,635 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:14:35,636 DEBUG [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, a8ea2e19ac6213389de31b5fc040ab59, d537f55d10ac7a1ac262a45953418c2d 2024-12-09T05:14:35,636 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.81 KB heapSize=5.32 KB 2024-12-09T05:14:35,641 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/d794289c3dce47a89c2f538f81dc6ae7 is 1080, key is row0029/info:/1733721269621/Put/seqid=0 2024-12-09T05:14:35,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741853_1029 (size=8193) 2024-12-09T05:14:35,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741853_1029 (size=8193) 2024-12-09T05:14:35,652 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/d794289c3dce47a89c2f538f81dc6ae7 2024-12-09T05:14:35,658 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740/.tmp/info/01908e210a6b4c98bf08b08ace21e521 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59./info:regioninfo/1733721168708/Put/seqid=0 2024-12-09T05:14:35,662 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/.tmp/info/d794289c3dce47a89c2f538f81dc6ae7 as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/d794289c3dce47a89c2f538f81dc6ae7 2024-12-09T05:14:35,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741854_1030 (size=8172) 2024-12-09T05:14:35,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741854_1030 (size=8172) 2024-12-09T05:14:35,665 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.59 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740/.tmp/info/01908e210a6b4c98bf08b08ace21e521 2024-12-09T05:14:35,675 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/d794289c3dce47a89c2f538f81dc6ae7, entries=3, sequenceid=48, filesize=8.0 K 2024-12-09T05:14:35,677 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for a8ea2e19ac6213389de31b5fc040ab59 in 42ms, sequenceid=48, compaction requested=true 2024-12-09T05:14:35,677 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/697c545300894470b2a8f874b5664f99, hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/0df56eeb0d0c40daaf7057a982774a1e, hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/b47660d4e7194f89b6984f6a06808ebe] to archive 2024-12-09T05:14:35,681 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T05:14:35,684 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/697c545300894470b2a8f874b5664f99 to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/697c545300894470b2a8f874b5664f99 2024-12-09T05:14:35,686 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/0df56eeb0d0c40daaf7057a982774a1e to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/0df56eeb0d0c40daaf7057a982774a1e 2024-12-09T05:14:35,688 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/b47660d4e7194f89b6984f6a06808ebe to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/info/b47660d4e7194f89b6984f6a06808ebe 2024-12-09T05:14:35,691 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740/.tmp/table/3ea67fa0909049dbb5e1a5c2b41aaf33 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733721168719/Put/seqid=0 2024-12-09T05:14:35,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741855_1031 (size=5452) 2024-12-09T05:14:35,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741855_1031 (size=5452) 2024-12-09T05:14:35,702 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=232 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740/.tmp/table/3ea67fa0909049dbb5e1a5c2b41aaf33 2024-12-09T05:14:35,707 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/default/TestLogRolling-testSlowSyncLogRolling/a8ea2e19ac6213389de31b5fc040ab59/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-09T05:14:35,710 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:14:35,711 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for a8ea2e19ac6213389de31b5fc040ab59: 2024-12-09T05:14:35,711 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733721168218.a8ea2e19ac6213389de31b5fc040ab59. 2024-12-09T05:14:35,711 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740/.tmp/info/01908e210a6b4c98bf08b08ace21e521 as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740/info/01908e210a6b4c98bf08b08ace21e521 2024-12-09T05:14:35,711 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing d537f55d10ac7a1ac262a45953418c2d, disabling compactions & flushes 2024-12-09T05:14:35,711 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. 2024-12-09T05:14:35,711 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. 2024-12-09T05:14:35,711 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. after waiting 0 ms 2024-12-09T05:14:35,711 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. 2024-12-09T05:14:35,711 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing d537f55d10ac7a1ac262a45953418c2d 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-09T05:14:35,719 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740/info/01908e210a6b4c98bf08b08ace21e521, entries=20, sequenceid=14, filesize=8.0 K 2024-12-09T05:14:35,720 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740/.tmp/table/3ea67fa0909049dbb5e1a5c2b41aaf33 as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740/table/3ea67fa0909049dbb5e1a5c2b41aaf33 2024-12-09T05:14:35,727 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740/table/3ea67fa0909049dbb5e1a5c2b41aaf33, entries=4, sequenceid=14, filesize=5.3 K 2024-12-09T05:14:35,728 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.81 KB/2882, heapSize ~5.04 KB/5160, currentSize=0 B/0 for 1588230740 in 93ms, sequenceid=14, compaction requested=false 2024-12-09T05:14:35,729 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/namespace/d537f55d10ac7a1ac262a45953418c2d/.tmp/info/01526085a1294bff8a28f6a049542527 is 45, key is default/info:d/1733721167930/Put/seqid=0 2024-12-09T05:14:35,733 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-09T05:14:35,734 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T05:14:35,734 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T05:14:35,735 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:14:35,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741856_1032 (size=5037) 2024-12-09T05:14:35,735 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T05:14:35,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741856_1032 (size=5037) 2024-12-09T05:14:35,736 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/namespace/d537f55d10ac7a1ac262a45953418c2d/.tmp/info/01526085a1294bff8a28f6a049542527 2024-12-09T05:14:35,743 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/namespace/d537f55d10ac7a1ac262a45953418c2d/.tmp/info/01526085a1294bff8a28f6a049542527 as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/namespace/d537f55d10ac7a1ac262a45953418c2d/info/01526085a1294bff8a28f6a049542527 2024-12-09T05:14:35,749 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/namespace/d537f55d10ac7a1ac262a45953418c2d/info/01526085a1294bff8a28f6a049542527, entries=2, sequenceid=6, filesize=4.9 K 2024-12-09T05:14:35,751 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for d537f55d10ac7a1ac262a45953418c2d in 39ms, sequenceid=6, compaction requested=false 2024-12-09T05:14:35,755 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/data/hbase/namespace/d537f55d10ac7a1ac262a45953418c2d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T05:14:35,756 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. 2024-12-09T05:14:35,756 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for d537f55d10ac7a1ac262a45953418c2d: 2024-12-09T05:14:35,756 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733721167376.d537f55d10ac7a1ac262a45953418c2d. 2024-12-09T05:14:35,836 INFO [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,41921,1733721165481; all regions closed. 2024-12-09T05:14:35,840 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481 2024-12-09T05:14:35,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741834_1010 (size=4330) 2024-12-09T05:14:35,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741834_1010 (size=4330) 2024-12-09T05:14:35,846 DEBUG [RS:0;41a709354867:41921 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/oldWALs 2024-12-09T05:14:35,846 INFO [RS:0;41a709354867:41921 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 41a709354867%2C41921%2C1733721165481.meta:.meta(num 1733721167121) 2024-12-09T05:14:35,847 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/WALs/41a709354867,41921,1733721165481 2024-12-09T05:14:35,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741851_1027 (size=13066) 2024-12-09T05:14:35,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741851_1027 (size=13066) 2024-12-09T05:14:35,855 DEBUG [RS:0;41a709354867:41921 {}] wal.AbstractFSWAL(1071): Moved 3 WAL file(s) to /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/oldWALs 2024-12-09T05:14:35,856 INFO [RS:0;41a709354867:41921 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 41a709354867%2C41921%2C1733721165481:(num 1733721255610) 2024-12-09T05:14:35,856 DEBUG [RS:0;41a709354867:41921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:14:35,856 INFO [RS:0;41a709354867:41921 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:14:35,856 INFO [RS:0;41a709354867:41921 {}] hbase.ChoreService(370): Chore service for: regionserver/41a709354867:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-09T05:14:35,856 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:14:35,857 INFO [RS:0;41a709354867:41921 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41921 2024-12-09T05:14:35,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/41a709354867,41921,1733721165481 2024-12-09T05:14:35,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:14:35,864 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [41a709354867,41921,1733721165481] 2024-12-09T05:14:35,864 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 41a709354867,41921,1733721165481; numProcessing=1 2024-12-09T05:14:35,865 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/41a709354867,41921,1733721165481 already deleted, retry=false 2024-12-09T05:14:35,865 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 41a709354867,41921,1733721165481 expired; onlineServers=0 2024-12-09T05:14:35,865 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,33145,1733721164634' ***** 2024-12-09T05:14:35,865 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T05:14:35,866 DEBUG [M:0;41a709354867:33145 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d4caf21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:14:35,866 INFO [M:0;41a709354867:33145 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,33145,1733721164634 2024-12-09T05:14:35,866 INFO [M:0;41a709354867:33145 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,33145,1733721164634; all regions closed. 2024-12-09T05:14:35,866 DEBUG [M:0;41a709354867:33145 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:14:35,866 DEBUG [M:0;41a709354867:33145 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T05:14:35,866 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T05:14:35,866 DEBUG [M:0;41a709354867:33145 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T05:14:35,866 DEBUG [master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721166404 {}] cleaner.HFileCleaner(306): Exit Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721166404,5,FailOnTimeoutGroup] 2024-12-09T05:14:35,866 DEBUG [master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721166401 {}] cleaner.HFileCleaner(306): Exit Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721166401,5,FailOnTimeoutGroup] 2024-12-09T05:14:35,866 INFO [M:0;41a709354867:33145 {}] hbase.ChoreService(370): Chore service for: master/41a709354867:0 had [] on shutdown 2024-12-09T05:14:35,866 DEBUG [M:0;41a709354867:33145 {}] master.HMaster(1733): Stopping service threads 2024-12-09T05:14:35,867 INFO [M:0;41a709354867:33145 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T05:14:35,867 INFO [M:0;41a709354867:33145 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T05:14:35,867 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T05:14:35,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T05:14:35,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:35,868 DEBUG [M:0;41a709354867:33145 {}] zookeeper.ZKUtil(347): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T05:14:35,868 WARN [M:0;41a709354867:33145 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T05:14:35,868 INFO [M:0;41a709354867:33145 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-09T05:14:35,868 INFO [M:0;41a709354867:33145 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T05:14:35,868 DEBUG [M:0;41a709354867:33145 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:14:35,868 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:14:35,868 INFO [M:0;41a709354867:33145 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:14:35,868 DEBUG [M:0;41a709354867:33145 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:14:35,868 DEBUG [M:0;41a709354867:33145 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:14:35,868 DEBUG [M:0;41a709354867:33145 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:14:35,869 INFO [M:0;41a709354867:33145 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.21 KB heapSize=50.14 KB 2024-12-09T05:14:35,894 DEBUG [M:0;41a709354867:33145 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c187d3b866a6400daab34eae58fe55ff is 82, key is hbase:meta,,1/info:regioninfo/1733721167249/Put/seqid=0 2024-12-09T05:14:35,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741857_1033 (size=5672) 2024-12-09T05:14:35,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741857_1033 (size=5672) 2024-12-09T05:14:35,902 INFO [M:0;41a709354867:33145 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c187d3b866a6400daab34eae58fe55ff 2024-12-09T05:14:35,934 DEBUG [M:0;41a709354867:33145 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/86da0eaa92b84a95862171f53f988cef is 766, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733721168727/Put/seqid=0 2024-12-09T05:14:35,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741858_1034 (size=6426) 2024-12-09T05:14:35,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741858_1034 (size=6426) 2024-12-09T05:14:35,942 INFO [M:0;41a709354867:33145 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.61 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/86da0eaa92b84a95862171f53f988cef 2024-12-09T05:14:35,949 INFO [M:0;41a709354867:33145 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 86da0eaa92b84a95862171f53f988cef 2024-12-09T05:14:35,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:14:35,963 INFO [RS:0;41a709354867:41921 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,41921,1733721165481; zookeeper connection closed. 2024-12-09T05:14:35,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41921-0x100753140d70001, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:14:35,964 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@52c6c5e1 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@52c6c5e1 2024-12-09T05:14:35,964 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T05:14:35,965 DEBUG [M:0;41a709354867:33145 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/583331976cdc414e9d31908b2c5ee171 is 69, key is 41a709354867,41921,1733721165481/rs:state/1733721166493/Put/seqid=0 2024-12-09T05:14:35,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741859_1035 (size=5156) 2024-12-09T05:14:35,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741859_1035 (size=5156) 2024-12-09T05:14:35,972 INFO [M:0;41a709354867:33145 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/583331976cdc414e9d31908b2c5ee171 2024-12-09T05:14:35,994 DEBUG [M:0;41a709354867:33145 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a46a0c0969f4504824aa0d2ee3ee858 is 52, key is load_balancer_on/state:d/1733721168178/Put/seqid=0 2024-12-09T05:14:36,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741860_1036 (size=5056) 2024-12-09T05:14:36,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741860_1036 (size=5056) 2024-12-09T05:14:36,002 INFO [M:0;41a709354867:33145 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a46a0c0969f4504824aa0d2ee3ee858 2024-12-09T05:14:36,009 DEBUG [M:0;41a709354867:33145 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c187d3b866a6400daab34eae58fe55ff as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c187d3b866a6400daab34eae58fe55ff 2024-12-09T05:14:36,016 INFO [M:0;41a709354867:33145 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c187d3b866a6400daab34eae58fe55ff, entries=8, sequenceid=104, filesize=5.5 K 2024-12-09T05:14:36,017 DEBUG [M:0;41a709354867:33145 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/86da0eaa92b84a95862171f53f988cef as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/86da0eaa92b84a95862171f53f988cef 2024-12-09T05:14:36,024 INFO [M:0;41a709354867:33145 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 86da0eaa92b84a95862171f53f988cef 2024-12-09T05:14:36,024 INFO [M:0;41a709354867:33145 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/86da0eaa92b84a95862171f53f988cef, entries=11, sequenceid=104, filesize=6.3 K 2024-12-09T05:14:36,025 DEBUG [M:0;41a709354867:33145 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/583331976cdc414e9d31908b2c5ee171 as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/583331976cdc414e9d31908b2c5ee171 2024-12-09T05:14:36,031 INFO [M:0;41a709354867:33145 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/583331976cdc414e9d31908b2c5ee171, entries=1, sequenceid=104, filesize=5.0 K 2024-12-09T05:14:36,032 DEBUG [M:0;41a709354867:33145 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a46a0c0969f4504824aa0d2ee3ee858 as hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8a46a0c0969f4504824aa0d2ee3ee858 2024-12-09T05:14:36,037 INFO [M:0;41a709354867:33145 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8a46a0c0969f4504824aa0d2ee3ee858, entries=1, sequenceid=104, filesize=4.9 K 2024-12-09T05:14:36,039 INFO [M:0;41a709354867:33145 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.21 KB/41173, heapSize ~50.08 KB/51280, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 170ms, sequenceid=104, compaction requested=false 2024-12-09T05:14:36,040 INFO [M:0;41a709354867:33145 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:14:36,040 DEBUG [M:0;41a709354867:33145 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:14:36,041 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/MasterData/WALs/41a709354867,33145,1733721164634 2024-12-09T05:14:36,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33923 is added to blk_1073741830_1006 (size=48474) 2024-12-09T05:14:36,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741830_1006 (size=48474) 2024-12-09T05:14:36,044 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:14:36,044 INFO [M:0;41a709354867:33145 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-09T05:14:36,044 INFO [M:0;41a709354867:33145 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33145 2024-12-09T05:14:36,046 DEBUG [M:0;41a709354867:33145 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/41a709354867,33145,1733721164634 already deleted, retry=false 2024-12-09T05:14:36,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:14:36,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33145-0x100753140d70000, quorum=127.0.0.1:52383, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:14:36,148 INFO [M:0;41a709354867:33145 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,33145,1733721164634; zookeeper connection closed. 2024-12-09T05:14:36,153 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77880d53{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:14:36,156 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7469b8b8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:14:36,156 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:14:36,157 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40635cd7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:14:36,157 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ab144a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/hadoop.log.dir/,STOPPED} 2024-12-09T05:14:36,160 WARN [BP-1669704492-172.17.0.2-1733721161171 heartbeating to localhost/127.0.0.1:33833 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:14:36,160 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:14:36,161 WARN [BP-1669704492-172.17.0.2-1733721161171 heartbeating to localhost/127.0.0.1:33833 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1669704492-172.17.0.2-1733721161171 (Datanode Uuid b5f42ee2-9770-4b5b-b8e6-3795094a5e7b) service to localhost/127.0.0.1:33833 2024-12-09T05:14:36,161 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:14:36,162 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/cluster_55a5741f-dc1e-8fed-1f25-ffd4352cce64/dfs/data/data3/current/BP-1669704492-172.17.0.2-1733721161171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:14:36,162 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/cluster_55a5741f-dc1e-8fed-1f25-ffd4352cce64/dfs/data/data4/current/BP-1669704492-172.17.0.2-1733721161171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:14:36,163 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:14:36,165 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1707a3cb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:14:36,165 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@359dcd6a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:14:36,165 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:14:36,166 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@583669e3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:14:36,166 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6eface5d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/hadoop.log.dir/,STOPPED} 2024-12-09T05:14:36,167 WARN [BP-1669704492-172.17.0.2-1733721161171 heartbeating to localhost/127.0.0.1:33833 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:14:36,167 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:14:36,167 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:14:36,167 WARN [BP-1669704492-172.17.0.2-1733721161171 heartbeating to localhost/127.0.0.1:33833 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1669704492-172.17.0.2-1733721161171 (Datanode Uuid 0828d983-17ca-4fea-a29b-64945bbde071) service to localhost/127.0.0.1:33833 2024-12-09T05:14:36,168 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/cluster_55a5741f-dc1e-8fed-1f25-ffd4352cce64/dfs/data/data1/current/BP-1669704492-172.17.0.2-1733721161171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:14:36,169 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/cluster_55a5741f-dc1e-8fed-1f25-ffd4352cce64/dfs/data/data2/current/BP-1669704492-172.17.0.2-1733721161171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:14:36,169 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:14:36,180 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@dbae14b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:14:36,181 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d0329cc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:14:36,181 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:14:36,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a2a2c8e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:14:36,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a602904{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/hadoop.log.dir/,STOPPED} 2024-12-09T05:14:36,189 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-09T05:14:36,226 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-09T05:14:36,234 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=63 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33833 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/41a709354867:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/41a709354867:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/41a709354867:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1179732672) connection to localhost/127.0.0.1:33833 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1179732672) connection to localhost/127.0.0.1:33833 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@15810e71 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33833 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33833 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33833 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:33833 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/41a709354867:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1179732672) connection to localhost/127.0.0.1:33833 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33833 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=406 (was 286) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=167 (was 278), ProcessCount=11 (was 11), AvailableMemoryMB=8774 (was 8530) - AvailableMemoryMB LEAK? - 2024-12-09T05:14:36,242 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=64, OpenFileDescriptor=406, MaxFileDescriptor=1048576, SystemLoadAverage=167, ProcessCount=11, AvailableMemoryMB=8774 2024-12-09T05:14:36,242 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T05:14:36,242 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/hadoop.log.dir so I do NOT create it in target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90 2024-12-09T05:14:36,242 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4fb5bf9d-4eda-9e61-9348-af9a4e84b794/hadoop.tmp.dir so I do NOT create it in target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90 2024-12-09T05:14:36,243 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780, deleteOnExit=true 2024-12-09T05:14:36,243 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-09T05:14:36,243 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/test.cache.data in system properties and HBase conf 2024-12-09T05:14:36,243 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T05:14:36,243 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir in system properties and HBase conf 2024-12-09T05:14:36,243 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T05:14:36,243 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T05:14:36,243 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-09T05:14:36,243 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T05:14:36,243 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/nfs.dump.dir in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/java.io.tmpdir in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T05:14:36,244 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T05:14:36,258 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T05:14:36,343 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:14:36,349 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:14:36,350 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:14:36,350 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:14:36,350 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:14:36,351 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:14:36,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@422490c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:14:36,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c54ccd5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:14:36,469 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39e90629{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/java.io.tmpdir/jetty-localhost-44271-hadoop-hdfs-3_4_1-tests_jar-_-any-4343382132557149642/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:14:36,469 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e4a5ed5{HTTP/1.1, (http/1.1)}{localhost:44271} 2024-12-09T05:14:36,469 INFO [Time-limited test {}] server.Server(415): Started @118023ms 2024-12-09T05:14:36,483 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T05:14:36,561 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:14:36,565 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:14:36,568 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:14:36,568 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:14:36,568 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:14:36,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27867745{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:14:36,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ebcf9b0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:14:36,571 INFO [regionserver/41a709354867:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:14:36,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@36692090{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/java.io.tmpdir/jetty-localhost-34931-hadoop-hdfs-3_4_1-tests_jar-_-any-17853451789823914764/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:14:36,687 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e4d7c05{HTTP/1.1, (http/1.1)}{localhost:34931} 2024-12-09T05:14:36,687 INFO [Time-limited test {}] server.Server(415): Started @118240ms 2024-12-09T05:14:36,689 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:14:36,729 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:14:36,733 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:14:36,735 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:14:36,735 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:14:36,735 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:14:36,736 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@416f69b7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:14:36,737 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ead7a1a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:14:36,802 WARN [Thread-450 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data1/current/BP-836600810-172.17.0.2-1733721276277/current, will proceed with Du for space computation calculation, 2024-12-09T05:14:36,802 WARN [Thread-451 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data2/current/BP-836600810-172.17.0.2-1733721276277/current, will proceed with Du for space computation calculation, 2024-12-09T05:14:36,827 WARN [Thread-429 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:14:36,831 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41ac650f6c2651e7 with lease ID 0xd6c99a4bb2bde8e0: Processing first storage report for DS-7fbf4c63-32af-429c-9350-fa24a258ab0b from datanode DatanodeRegistration(127.0.0.1:39683, datanodeUuid=1a113e70-79f6-49e8-828b-2d450525baf8, infoPort=43247, infoSecurePort=0, ipcPort=33617, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277) 2024-12-09T05:14:36,831 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41ac650f6c2651e7 with lease ID 0xd6c99a4bb2bde8e0: from storage DS-7fbf4c63-32af-429c-9350-fa24a258ab0b node DatanodeRegistration(127.0.0.1:39683, datanodeUuid=1a113e70-79f6-49e8-828b-2d450525baf8, infoPort=43247, infoSecurePort=0, ipcPort=33617, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T05:14:36,831 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41ac650f6c2651e7 with lease ID 0xd6c99a4bb2bde8e0: Processing first storage report for DS-22119521-fabb-41b7-9345-61d2cd833f95 from datanode DatanodeRegistration(127.0.0.1:39683, datanodeUuid=1a113e70-79f6-49e8-828b-2d450525baf8, infoPort=43247, infoSecurePort=0, ipcPort=33617, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277) 2024-12-09T05:14:36,831 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41ac650f6c2651e7 with lease ID 0xd6c99a4bb2bde8e0: from storage DS-22119521-fabb-41b7-9345-61d2cd833f95 node DatanodeRegistration(127.0.0.1:39683, datanodeUuid=1a113e70-79f6-49e8-828b-2d450525baf8, infoPort=43247, infoSecurePort=0, ipcPort=33617, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:14:36,873 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27178800{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/java.io.tmpdir/jetty-localhost-46565-hadoop-hdfs-3_4_1-tests_jar-_-any-16676967364435359900/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:14:36,873 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@431af123{HTTP/1.1, (http/1.1)}{localhost:46565} 2024-12-09T05:14:36,873 INFO [Time-limited test {}] server.Server(415): Started @118427ms 2024-12-09T05:14:36,876 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:14:37,003 WARN [Thread-476 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data3/current/BP-836600810-172.17.0.2-1733721276277/current, will proceed with Du for space computation calculation, 2024-12-09T05:14:37,003 WARN [Thread-477 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data4/current/BP-836600810-172.17.0.2-1733721276277/current, will proceed with Du for space computation calculation, 2024-12-09T05:14:37,027 WARN [Thread-465 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:14:37,030 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe7ba30d16f88c33 with lease ID 0xd6c99a4bb2bde8e1: Processing first storage report for DS-51f684b6-5442-4769-a5b7-81ca384ca30c from datanode DatanodeRegistration(127.0.0.1:34889, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=40243, infoSecurePort=0, ipcPort=43517, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277) 2024-12-09T05:14:37,030 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe7ba30d16f88c33 with lease ID 0xd6c99a4bb2bde8e1: from storage DS-51f684b6-5442-4769-a5b7-81ca384ca30c node DatanodeRegistration(127.0.0.1:34889, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=40243, infoSecurePort=0, ipcPort=43517, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:14:37,030 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe7ba30d16f88c33 with lease ID 0xd6c99a4bb2bde8e1: Processing first storage report for DS-7f4167d3-6738-47e8-b1b7-2244c6f43708 from datanode DatanodeRegistration(127.0.0.1:34889, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=40243, infoSecurePort=0, ipcPort=43517, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277) 2024-12-09T05:14:37,030 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe7ba30d16f88c33 with lease ID 0xd6c99a4bb2bde8e1: from storage DS-7f4167d3-6738-47e8-b1b7-2244c6f43708 node DatanodeRegistration(127.0.0.1:34889, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=40243, infoSecurePort=0, ipcPort=43517, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:14:37,120 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90 2024-12-09T05:14:37,123 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/zookeeper_0, clientPort=55669, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T05:14:37,124 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=55669 2024-12-09T05:14:37,125 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:14:37,126 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:14:37,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34889 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:14:37,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39683 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:14:37,139 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691 with version=8 2024-12-09T05:14:37,139 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/hbase-staging 2024-12-09T05:14:37,142 INFO [Time-limited test {}] client.ConnectionUtils(129): master/41a709354867:0 server-side Connection retries=45 2024-12-09T05:14:37,142 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:14:37,142 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:14:37,142 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:14:37,142 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:14:37,142 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:14:37,142 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:14:37,142 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:14:37,143 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36393 2024-12-09T05:14:37,143 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:14:37,145 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:14:37,148 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:36393 connecting to ZooKeeper ensemble=127.0.0.1:55669 2024-12-09T05:14:37,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:363930x0, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:14:37,156 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36393-0x1007532fba90000 connected 2024-12-09T05:14:37,171 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:14:37,172 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:14:37,172 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:14:37,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36393 2024-12-09T05:14:37,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36393 2024-12-09T05:14:37,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36393 2024-12-09T05:14:37,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36393 2024-12-09T05:14:37,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36393 2024-12-09T05:14:37,174 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691, hbase.cluster.distributed=false 2024-12-09T05:14:37,192 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/41a709354867:0 server-side Connection retries=45 2024-12-09T05:14:37,193 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:14:37,193 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:14:37,193 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:14:37,193 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:14:37,193 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:14:37,193 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:14:37,193 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:14:37,194 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36179 2024-12-09T05:14:37,194 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:14:37,195 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:14:37,196 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:14:37,198 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:14:37,201 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36179 connecting to ZooKeeper ensemble=127.0.0.1:55669 2024-12-09T05:14:37,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:361790x0, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:14:37,204 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36179-0x1007532fba90001 connected 2024-12-09T05:14:37,204 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:14:37,205 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:14:37,206 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:14:37,206 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36179 2024-12-09T05:14:37,206 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36179 2024-12-09T05:14:37,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36179 2024-12-09T05:14:37,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36179 2024-12-09T05:14:37,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36179 2024-12-09T05:14:37,209 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/41a709354867,36393,1733721277141 2024-12-09T05:14:37,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:14:37,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:14:37,212 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/41a709354867,36393,1733721277141 2024-12-09T05:14:37,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:14:37,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:14:37,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:37,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:37,215 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:14:37,215 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/41a709354867,36393,1733721277141 from backup master directory 2024-12-09T05:14:37,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:14:37,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/41a709354867,36393,1733721277141 2024-12-09T05:14:37,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:14:37,217 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:14:37,217 WARN [master/41a709354867:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:14:37,217 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=41a709354867,36393,1733721277141 2024-12-09T05:14:37,226 DEBUG [M:0;41a709354867:36393 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;41a709354867:36393 2024-12-09T05:14:37,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34889 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:14:37,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39683 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:14:37,244 DEBUG [master/41a709354867:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/hbase.id with ID: 6601d4de-2f15-47cc-a8a0-9c7c60f15886 2024-12-09T05:14:37,258 INFO [master/41a709354867:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:14:37,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:37,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:37,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39683 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:14:37,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34889 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:14:37,271 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:14:37,273 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T05:14:37,273 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:14:37,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39683 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:14:37,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34889 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:14:37,284 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store 2024-12-09T05:14:37,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39683 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:14:37,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34889 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:14:37,293 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:14:37,293 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:14:37,293 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:14:37,293 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:14:37,293 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:14:37,293 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:14:37,293 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:14:37,293 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:14:37,294 WARN [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/.initializing 2024-12-09T05:14:37,294 DEBUG [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141 2024-12-09T05:14:37,298 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C36393%2C1733721277141, suffix=, logDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141, archiveDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/oldWALs, maxLogs=10 2024-12-09T05:14:37,298 INFO [master/41a709354867:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36393%2C1733721277141.1733721277298 2024-12-09T05:14:37,305 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141/41a709354867%2C36393%2C1733721277141.1733721277298 2024-12-09T05:14:37,305 DEBUG [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43247:43247),(127.0.0.1/127.0.0.1:40243:40243)] 2024-12-09T05:14:37,305 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:14:37,306 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:14:37,306 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:14:37,306 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:14:37,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:14:37,311 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T05:14:37,311 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:37,312 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:14:37,312 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:14:37,314 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T05:14:37,314 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:37,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:14:37,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:14:37,317 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T05:14:37,317 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:37,318 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:14:37,318 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:14:37,321 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T05:14:37,321 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:37,322 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:14:37,323 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:14:37,323 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:14:37,326 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T05:14:37,327 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:14:37,330 DEBUG [master/41a709354867:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:14:37,330 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762477, jitterRate=-0.030460968613624573}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T05:14:37,331 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:14:37,332 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T05:14:37,337 DEBUG [master/41a709354867:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78d04f8b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:14:37,338 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-09T05:14:37,338 INFO [master/41a709354867:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T05:14:37,338 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T05:14:37,338 INFO [master/41a709354867:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T05:14:37,339 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T05:14:37,339 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-09T05:14:37,339 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T05:14:37,341 INFO [master/41a709354867:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T05:14:37,342 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T05:14:37,344 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-09T05:14:37,344 INFO [master/41a709354867:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T05:14:37,345 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T05:14:37,346 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-09T05:14:37,346 INFO [master/41a709354867:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T05:14:37,347 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T05:14:37,348 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-09T05:14:37,349 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T05:14:37,350 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T05:14:37,351 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T05:14:37,352 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T05:14:37,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:14:37,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:14:37,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:37,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:37,354 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=41a709354867,36393,1733721277141, sessionid=0x1007532fba90000, setting cluster-up flag (Was=false) 2024-12-09T05:14:37,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:37,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:37,362 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T05:14:37,363 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=41a709354867,36393,1733721277141 2024-12-09T05:14:37,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:37,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:37,371 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T05:14:37,372 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=41a709354867,36393,1733721277141 2024-12-09T05:14:37,375 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-09T05:14:37,375 INFO [master/41a709354867:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-09T05:14:37,376 INFO [master/41a709354867:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T05:14:37,376 DEBUG [master/41a709354867:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 41a709354867,36393,1733721277141 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T05:14:37,376 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:14:37,376 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:14:37,376 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:14:37,377 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:14:37,377 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/41a709354867:0, corePoolSize=10, maxPoolSize=10 2024-12-09T05:14:37,377 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:37,377 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:14:37,377 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:37,380 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733721307380 2024-12-09T05:14:37,381 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T05:14:37,381 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T05:14:37,381 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T05:14:37,381 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T05:14:37,381 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T05:14:37,381 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T05:14:37,381 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:14:37,381 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:37,381 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-09T05:14:37,381 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T05:14:37,382 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T05:14:37,382 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T05:14:37,382 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T05:14:37,382 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T05:14:37,382 DEBUG [master/41a709354867:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721277382,5,FailOnTimeoutGroup] 2024-12-09T05:14:37,383 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:37,383 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:14:37,384 DEBUG [master/41a709354867:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721277382,5,FailOnTimeoutGroup] 2024-12-09T05:14:37,384 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:37,384 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T05:14:37,384 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:37,384 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:37,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39683 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:14:37,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34889 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:14:37,392 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-09T05:14:37,392 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691 2024-12-09T05:14:37,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39683 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:14:37,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34889 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:14:37,400 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:14:37,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:14:37,404 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:14:37,404 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:37,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:14:37,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:14:37,406 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:14:37,406 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:37,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:14:37,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:14:37,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:14:37,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:37,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:14:37,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/meta/1588230740 2024-12-09T05:14:37,411 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/meta/1588230740 2024-12-09T05:14:37,412 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T05:14:37,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T05:14:37,417 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:14:37,418 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=752367, jitterRate=-0.04331618547439575}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T05:14:37,419 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T05:14:37,419 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:14:37,419 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:14:37,419 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:14:37,420 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:14:37,420 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:14:37,420 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T05:14:37,420 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:14:37,422 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:14:37,422 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-09T05:14:37,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T05:14:37,423 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T05:14:37,425 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T05:14:37,429 DEBUG [RS:0;41a709354867:36179 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;41a709354867:36179 2024-12-09T05:14:37,430 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1008): ClusterId : 6601d4de-2f15-47cc-a8a0-9c7c60f15886 2024-12-09T05:14:37,430 DEBUG [RS:0;41a709354867:36179 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:14:37,433 DEBUG [RS:0;41a709354867:36179 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:14:37,433 DEBUG [RS:0;41a709354867:36179 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:14:37,435 DEBUG [RS:0;41a709354867:36179 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:14:37,435 DEBUG [RS:0;41a709354867:36179 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a7b6a26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:14:37,436 DEBUG [RS:0;41a709354867:36179 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5212dd0a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:14:37,436 INFO [RS:0;41a709354867:36179 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-09T05:14:37,436 INFO [RS:0;41a709354867:36179 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-09T05:14:37,436 DEBUG [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-09T05:14:37,437 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(3073): reportForDuty to master=41a709354867,36393,1733721277141 with isa=41a709354867/172.17.0.2:36179, startcode=1733721277192 2024-12-09T05:14:37,437 DEBUG [RS:0;41a709354867:36179 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:14:37,440 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54941, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:14:37,440 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36393 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 41a709354867,36179,1733721277192 2024-12-09T05:14:37,441 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36393 {}] master.ServerManager(486): Registering regionserver=41a709354867,36179,1733721277192 2024-12-09T05:14:37,442 DEBUG [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691 2024-12-09T05:14:37,442 DEBUG [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:45367 2024-12-09T05:14:37,442 DEBUG [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-09T05:14:37,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:14:37,445 DEBUG [RS:0;41a709354867:36179 {}] zookeeper.ZKUtil(111): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/41a709354867,36179,1733721277192 2024-12-09T05:14:37,445 WARN [RS:0;41a709354867:36179 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:14:37,445 INFO [RS:0;41a709354867:36179 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:14:37,445 DEBUG [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192 2024-12-09T05:14:37,446 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [41a709354867,36179,1733721277192] 2024-12-09T05:14:37,449 DEBUG [RS:0;41a709354867:36179 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-09T05:14:37,450 INFO [RS:0;41a709354867:36179 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:14:37,452 INFO [RS:0;41a709354867:36179 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:14:37,453 INFO [RS:0;41a709354867:36179 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:14:37,453 INFO [RS:0;41a709354867:36179 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:37,456 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-09T05:14:37,457 INFO [RS:0;41a709354867:36179 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:37,457 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:37,457 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:37,457 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:37,457 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:37,457 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:37,458 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:14:37,458 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:37,458 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:37,458 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:37,458 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:37,459 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:37,459 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:14:37,459 DEBUG [RS:0;41a709354867:36179 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:14:37,459 INFO [RS:0;41a709354867:36179 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:37,459 INFO [RS:0;41a709354867:36179 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:37,459 INFO [RS:0;41a709354867:36179 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:37,459 INFO [RS:0;41a709354867:36179 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:37,459 INFO [RS:0;41a709354867:36179 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36179,1733721277192-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:14:37,477 INFO [RS:0;41a709354867:36179 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:14:37,477 INFO [RS:0;41a709354867:36179 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36179,1733721277192-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:37,493 INFO [RS:0;41a709354867:36179 {}] regionserver.Replication(204): 41a709354867,36179,1733721277192 started 2024-12-09T05:14:37,493 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1767): Serving as 41a709354867,36179,1733721277192, RpcServer on 41a709354867/172.17.0.2:36179, sessionid=0x1007532fba90001 2024-12-09T05:14:37,493 DEBUG [RS:0;41a709354867:36179 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:14:37,493 DEBUG [RS:0;41a709354867:36179 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 41a709354867,36179,1733721277192 2024-12-09T05:14:37,493 DEBUG [RS:0;41a709354867:36179 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,36179,1733721277192' 2024-12-09T05:14:37,493 DEBUG [RS:0;41a709354867:36179 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:14:37,493 DEBUG [RS:0;41a709354867:36179 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:14:37,494 DEBUG [RS:0;41a709354867:36179 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:14:37,494 DEBUG [RS:0;41a709354867:36179 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:14:37,494 DEBUG [RS:0;41a709354867:36179 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 41a709354867,36179,1733721277192 2024-12-09T05:14:37,494 DEBUG [RS:0;41a709354867:36179 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,36179,1733721277192' 2024-12-09T05:14:37,494 DEBUG [RS:0;41a709354867:36179 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:14:37,494 DEBUG [RS:0;41a709354867:36179 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:14:37,495 DEBUG [RS:0;41a709354867:36179 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:14:37,495 INFO [RS:0;41a709354867:36179 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:14:37,495 INFO [RS:0;41a709354867:36179 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:14:37,575 WARN [41a709354867:36393 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-09T05:14:37,598 INFO [RS:0;41a709354867:36179 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C36179%2C1733721277192, suffix=, logDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192, archiveDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/oldWALs, maxLogs=32 2024-12-09T05:14:37,601 INFO [RS:0;41a709354867:36179 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36179%2C1733721277192.1733721277601 2024-12-09T05:14:37,612 INFO [RS:0;41a709354867:36179 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 2024-12-09T05:14:37,612 DEBUG [RS:0;41a709354867:36179 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40243:40243),(127.0.0.1/127.0.0.1:43247:43247)] 2024-12-09T05:14:37,825 DEBUG [41a709354867:36393 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T05:14:37,826 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=41a709354867,36179,1733721277192 2024-12-09T05:14:37,827 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 41a709354867,36179,1733721277192, state=OPENING 2024-12-09T05:14:37,829 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T05:14:37,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:37,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:37,831 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=41a709354867,36179,1733721277192}] 2024-12-09T05:14:37,831 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:14:37,831 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:14:37,984 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 41a709354867,36179,1733721277192 2024-12-09T05:14:37,985 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T05:14:37,987 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T05:14:37,992 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-09T05:14:37,992 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:14:37,994 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C36179%2C1733721277192.meta, suffix=.meta, logDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192, archiveDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/oldWALs, maxLogs=32 2024-12-09T05:14:37,996 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta 2024-12-09T05:14:38,002 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta 2024-12-09T05:14:38,003 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40243:40243),(127.0.0.1/127.0.0.1:43247:43247)] 2024-12-09T05:14:38,003 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:14:38,003 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T05:14:38,003 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T05:14:38,003 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T05:14:38,003 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T05:14:38,003 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:14:38,004 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-09T05:14:38,004 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-09T05:14:38,005 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:14:38,007 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:14:38,007 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:38,008 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:14:38,008 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:14:38,009 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:14:38,009 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:38,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:14:38,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:14:38,010 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:14:38,010 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:38,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:14:38,012 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/meta/1588230740 2024-12-09T05:14:38,013 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/meta/1588230740 2024-12-09T05:14:38,015 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T05:14:38,017 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T05:14:38,018 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734246, jitterRate=-0.06635913252830505}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T05:14:38,019 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T05:14:38,020 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733721277984 2024-12-09T05:14:38,023 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T05:14:38,023 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-09T05:14:38,024 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,36179,1733721277192 2024-12-09T05:14:38,025 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 41a709354867,36179,1733721277192, state=OPEN 2024-12-09T05:14:38,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:14:38,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:14:38,029 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:14:38,029 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:14:38,033 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T05:14:38,033 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=41a709354867,36179,1733721277192 in 199 msec 2024-12-09T05:14:38,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T05:14:38,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 611 msec 2024-12-09T05:14:38,038 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 662 msec 2024-12-09T05:14:38,038 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733721278038, completionTime=-1 2024-12-09T05:14:38,038 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T05:14:38,038 DEBUG [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-09T05:14:38,039 DEBUG [hconnection-0xb5a3501-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:14:38,040 INFO [RS-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37966, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:14:38,042 INFO [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-09T05:14:38,042 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733721338042 2024-12-09T05:14:38,042 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733721398042 2024-12-09T05:14:38,042 INFO [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-09T05:14:38,049 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36393,1733721277141-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,049 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36393,1733721277141-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,049 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36393,1733721277141-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,049 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-41a709354867:36393, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,049 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,050 INFO [master/41a709354867:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-09T05:14:38,050 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:14:38,051 DEBUG [master/41a709354867:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-09T05:14:38,051 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-09T05:14:38,053 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:14:38,053 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:38,054 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:14:38,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39683 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:14:38,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34889 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:14:38,065 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b115246a15fd3345eb9dae2059e50f32, NAME => 'hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691 2024-12-09T05:14:38,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39683 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:14:38,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34889 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:14:38,073 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:14:38,073 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing b115246a15fd3345eb9dae2059e50f32, disabling compactions & flushes 2024-12-09T05:14:38,073 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. 2024-12-09T05:14:38,073 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. 2024-12-09T05:14:38,073 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. after waiting 0 ms 2024-12-09T05:14:38,073 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. 2024-12-09T05:14:38,073 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. 2024-12-09T05:14:38,074 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for b115246a15fd3345eb9dae2059e50f32: 2024-12-09T05:14:38,075 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:14:38,075 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733721278075"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721278075"}]},"ts":"1733721278075"} 2024-12-09T05:14:38,078 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T05:14:38,079 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:14:38,080 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721278079"}]},"ts":"1733721278079"} 2024-12-09T05:14:38,082 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-09T05:14:38,086 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=b115246a15fd3345eb9dae2059e50f32, ASSIGN}] 2024-12-09T05:14:38,088 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=b115246a15fd3345eb9dae2059e50f32, ASSIGN 2024-12-09T05:14:38,089 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=b115246a15fd3345eb9dae2059e50f32, ASSIGN; state=OFFLINE, location=41a709354867,36179,1733721277192; forceNewPlan=false, retain=false 2024-12-09T05:14:38,239 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=b115246a15fd3345eb9dae2059e50f32, regionState=OPENING, regionLocation=41a709354867,36179,1733721277192 2024-12-09T05:14:38,242 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure b115246a15fd3345eb9dae2059e50f32, server=41a709354867,36179,1733721277192}] 2024-12-09T05:14:38,396 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 41a709354867,36179,1733721277192 2024-12-09T05:14:38,401 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. 2024-12-09T05:14:38,401 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => b115246a15fd3345eb9dae2059e50f32, NAME => 'hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:14:38,401 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace b115246a15fd3345eb9dae2059e50f32 2024-12-09T05:14:38,401 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:14:38,402 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for b115246a15fd3345eb9dae2059e50f32 2024-12-09T05:14:38,402 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for b115246a15fd3345eb9dae2059e50f32 2024-12-09T05:14:38,403 INFO [StoreOpener-b115246a15fd3345eb9dae2059e50f32-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b115246a15fd3345eb9dae2059e50f32 2024-12-09T05:14:38,405 INFO [StoreOpener-b115246a15fd3345eb9dae2059e50f32-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b115246a15fd3345eb9dae2059e50f32 columnFamilyName info 2024-12-09T05:14:38,405 DEBUG [StoreOpener-b115246a15fd3345eb9dae2059e50f32-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:38,406 INFO [StoreOpener-b115246a15fd3345eb9dae2059e50f32-1 {}] regionserver.HStore(327): Store=b115246a15fd3345eb9dae2059e50f32/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:14:38,407 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/namespace/b115246a15fd3345eb9dae2059e50f32 2024-12-09T05:14:38,407 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/namespace/b115246a15fd3345eb9dae2059e50f32 2024-12-09T05:14:38,410 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for b115246a15fd3345eb9dae2059e50f32 2024-12-09T05:14:38,412 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/namespace/b115246a15fd3345eb9dae2059e50f32/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:14:38,413 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened b115246a15fd3345eb9dae2059e50f32; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848237, jitterRate=0.07858984172344208}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:14:38,414 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for b115246a15fd3345eb9dae2059e50f32: 2024-12-09T05:14:38,415 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32., pid=6, masterSystemTime=1733721278396 2024-12-09T05:14:38,417 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. 2024-12-09T05:14:38,417 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. 2024-12-09T05:14:38,418 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=b115246a15fd3345eb9dae2059e50f32, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,36179,1733721277192 2024-12-09T05:14:38,422 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T05:14:38,422 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure b115246a15fd3345eb9dae2059e50f32, server=41a709354867,36179,1733721277192 in 178 msec 2024-12-09T05:14:38,425 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T05:14:38,425 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=b115246a15fd3345eb9dae2059e50f32, ASSIGN in 336 msec 2024-12-09T05:14:38,426 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:14:38,427 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721278426"}]},"ts":"1733721278426"} 2024-12-09T05:14:38,428 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-09T05:14:38,431 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:14:38,433 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 381 msec 2024-12-09T05:14:38,453 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-09T05:14:38,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:14:38,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:38,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:14:38,460 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-09T05:14:38,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:14:38,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 12 msec 2024-12-09T05:14:38,482 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-09T05:14:38,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:14:38,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 11 msec 2024-12-09T05:14:38,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-09T05:14:38,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-09T05:14:38,509 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.291sec 2024-12-09T05:14:38,509 INFO [master/41a709354867:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T05:14:38,509 INFO [master/41a709354867:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T05:14:38,509 INFO [master/41a709354867:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T05:14:38,509 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T05:14:38,509 INFO [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T05:14:38,509 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36393,1733721277141-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:14:38,510 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36393,1733721277141-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T05:14:38,512 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x193d035d to 127.0.0.1:55669 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@563fb1f 2024-12-09T05:14:38,512 DEBUG [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-09T05:14:38,512 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T05:14:38,512 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36393,1733721277141-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,515 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18736e4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:14:38,517 DEBUG [hconnection-0x70ba33c5-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:14:38,519 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:14:38,521 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=41a709354867,36393,1733721277141 2024-12-09T05:14:38,522 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:14:38,525 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-09T05:14:38,549 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/41a709354867:0 server-side Connection retries=45 2024-12-09T05:14:38,550 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:14:38,550 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:14:38,550 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:14:38,550 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:14:38,550 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:14:38,550 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:14:38,550 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:14:38,551 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41493 2024-12-09T05:14:38,551 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:14:38,552 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:14:38,553 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:14:38,555 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:14:38,558 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41493 connecting to ZooKeeper ensemble=127.0.0.1:55669 2024-12-09T05:14:38,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:414930x0, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:14:38,561 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41493-0x1007532fba90003, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:14:38,561 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41493-0x1007532fba90003 connected 2024-12-09T05:14:38,562 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41493-0x1007532fba90003, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-09T05:14:38,563 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41493-0x1007532fba90003, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:14:38,563 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41493 2024-12-09T05:14:38,563 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41493 2024-12-09T05:14:38,567 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41493 2024-12-09T05:14:38,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41493 2024-12-09T05:14:38,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41493 2024-12-09T05:14:38,569 DEBUG [pool-282-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-09T05:14:38,581 DEBUG [RS:1;41a709354867:41493 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;41a709354867:41493 2024-12-09T05:14:38,582 INFO [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(1008): ClusterId : 6601d4de-2f15-47cc-a8a0-9c7c60f15886 2024-12-09T05:14:38,582 DEBUG [RS:1;41a709354867:41493 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:14:38,585 DEBUG [RS:1;41a709354867:41493 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:14:38,585 DEBUG [RS:1;41a709354867:41493 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:14:38,587 DEBUG [RS:1;41a709354867:41493 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:14:38,588 DEBUG [RS:1;41a709354867:41493 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b0b71f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:14:38,588 DEBUG [RS:1;41a709354867:41493 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a818c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:14:38,588 INFO [RS:1;41a709354867:41493 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-09T05:14:38,588 INFO [RS:1;41a709354867:41493 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-09T05:14:38,588 DEBUG [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-09T05:14:38,589 INFO [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(3073): reportForDuty to master=41a709354867,36393,1733721277141 with isa=41a709354867/172.17.0.2:41493, startcode=1733721278549 2024-12-09T05:14:38,589 DEBUG [RS:1;41a709354867:41493 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:14:38,591 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35993, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:14:38,591 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36393 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 41a709354867,41493,1733721278549 2024-12-09T05:14:38,591 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36393 {}] master.ServerManager(486): Registering regionserver=41a709354867,41493,1733721278549 2024-12-09T05:14:38,593 DEBUG [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691 2024-12-09T05:14:38,593 DEBUG [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:45367 2024-12-09T05:14:38,593 DEBUG [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-09T05:14:38,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:14:38,595 DEBUG [RS:1;41a709354867:41493 {}] zookeeper.ZKUtil(111): regionserver:41493-0x1007532fba90003, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/41a709354867,41493,1733721278549 2024-12-09T05:14:38,595 WARN [RS:1;41a709354867:41493 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:14:38,595 INFO [RS:1;41a709354867:41493 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:14:38,596 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [41a709354867,41493,1733721278549] 2024-12-09T05:14:38,596 DEBUG [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,41493,1733721278549 2024-12-09T05:14:38,600 DEBUG [RS:1;41a709354867:41493 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-09T05:14:38,601 INFO [RS:1;41a709354867:41493 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:14:38,603 INFO [RS:1;41a709354867:41493 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:14:38,603 INFO [RS:1;41a709354867:41493 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:14:38,603 INFO [RS:1;41a709354867:41493 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,604 INFO [RS:1;41a709354867:41493 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-09T05:14:38,605 INFO [RS:1;41a709354867:41493 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:14:38,605 DEBUG [RS:1;41a709354867:41493 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:14:38,606 INFO [RS:1;41a709354867:41493 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,606 INFO [RS:1;41a709354867:41493 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,606 INFO [RS:1;41a709354867:41493 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,606 INFO [RS:1;41a709354867:41493 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,606 INFO [RS:1;41a709354867:41493 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,41493,1733721278549-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:14:38,624 INFO [RS:1;41a709354867:41493 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:14:38,624 INFO [RS:1;41a709354867:41493 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,41493,1733721278549-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:14:38,639 INFO [RS:1;41a709354867:41493 {}] regionserver.Replication(204): 41a709354867,41493,1733721278549 started 2024-12-09T05:14:38,640 INFO [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(1767): Serving as 41a709354867,41493,1733721278549, RpcServer on 41a709354867/172.17.0.2:41493, sessionid=0x1007532fba90003 2024-12-09T05:14:38,640 DEBUG [RS:1;41a709354867:41493 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:14:38,640 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3355): Started new server=Thread[RS:1;41a709354867:41493,5,FailOnTimeoutGroup] 2024-12-09T05:14:38,640 DEBUG [RS:1;41a709354867:41493 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 41a709354867,41493,1733721278549 2024-12-09T05:14:38,640 DEBUG [RS:1;41a709354867:41493 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,41493,1733721278549' 2024-12-09T05:14:38,640 DEBUG [RS:1;41a709354867:41493 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:14:38,640 INFO [Time-limited test {}] wal.TestLogRolling(191): Replication=2 2024-12-09T05:14:38,640 DEBUG [RS:1;41a709354867:41493 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:14:38,641 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T05:14:38,641 DEBUG [RS:1;41a709354867:41493 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:14:38,641 DEBUG [RS:1;41a709354867:41493 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:14:38,641 DEBUG [RS:1;41a709354867:41493 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 41a709354867,41493,1733721278549 2024-12-09T05:14:38,641 DEBUG [RS:1;41a709354867:41493 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,41493,1733721278549' 2024-12-09T05:14:38,641 DEBUG [RS:1;41a709354867:41493 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:14:38,641 DEBUG [RS:1;41a709354867:41493 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:14:38,642 DEBUG [RS:1;41a709354867:41493 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:14:38,642 INFO [RS:1;41a709354867:41493 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:14:38,642 INFO [RS:1;41a709354867:41493 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:14:38,643 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33290, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T05:14:38,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36393 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T05:14:38,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36393 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T05:14:38,645 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36393 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:14:38,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36393 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T05:14:38,647 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:14:38,648 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:38,648 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36393 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 9 2024-12-09T05:14:38,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36393 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T05:14:38,649 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:14:38,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34889 is added to blk_1073741837_1013 (size=393) 2024-12-09T05:14:38,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39683 is added to blk_1073741837_1013 (size=393) 2024-12-09T05:14:38,659 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 688aa77f4712fd33e61f733d63bfbd0a, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691 2024-12-09T05:14:38,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39683 is added to blk_1073741838_1014 (size=76) 2024-12-09T05:14:38,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34889 is added to blk_1073741838_1014 (size=76) 2024-12-09T05:14:38,667 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:14:38,667 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1681): Closing 688aa77f4712fd33e61f733d63bfbd0a, disabling compactions & flushes 2024-12-09T05:14:38,667 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:14:38,667 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:14:38,667 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. after waiting 0 ms 2024-12-09T05:14:38,667 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:14:38,667 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:14:38,667 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1635): Region close journal for 688aa77f4712fd33e61f733d63bfbd0a: 2024-12-09T05:14:38,668 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:14:38,669 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733721278668"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721278668"}]},"ts":"1733721278668"} 2024-12-09T05:14:38,671 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T05:14:38,672 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:14:38,672 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721278672"}]},"ts":"1733721278672"} 2024-12-09T05:14:38,674 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-09T05:14:38,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=688aa77f4712fd33e61f733d63bfbd0a, ASSIGN}] 2024-12-09T05:14:38,679 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=688aa77f4712fd33e61f733d63bfbd0a, ASSIGN 2024-12-09T05:14:38,680 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=688aa77f4712fd33e61f733d63bfbd0a, ASSIGN; state=OFFLINE, location=41a709354867,36179,1733721277192; forceNewPlan=false, retain=false 2024-12-09T05:14:38,745 INFO [RS:1;41a709354867:41493 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C41493%2C1733721278549, suffix=, logDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,41493,1733721278549, archiveDir=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/oldWALs, maxLogs=32 2024-12-09T05:14:38,746 INFO [RS:1;41a709354867:41493 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41493%2C1733721278549.1733721278746 2024-12-09T05:14:38,753 INFO [RS:1;41a709354867:41493 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,41493,1733721278549/41a709354867%2C41493%2C1733721278549.1733721278746 2024-12-09T05:14:38,753 DEBUG [RS:1;41a709354867:41493 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40243:40243),(127.0.0.1/127.0.0.1:43247:43247)] 2024-12-09T05:14:38,831 INFO [41a709354867:36393 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T05:14:38,832 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=688aa77f4712fd33e61f733d63bfbd0a, regionState=OPENING, regionLocation=41a709354867,36179,1733721277192 2024-12-09T05:14:38,834 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 688aa77f4712fd33e61f733d63bfbd0a, server=41a709354867,36179,1733721277192}] 2024-12-09T05:14:38,987 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 41a709354867,36179,1733721277192 2024-12-09T05:14:38,992 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:14:38,993 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 688aa77f4712fd33e61f733d63bfbd0a, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:14:38,994 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 688aa77f4712fd33e61f733d63bfbd0a 2024-12-09T05:14:38,994 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:14:38,994 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 688aa77f4712fd33e61f733d63bfbd0a 2024-12-09T05:14:38,994 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 688aa77f4712fd33e61f733d63bfbd0a 2024-12-09T05:14:38,996 INFO [StoreOpener-688aa77f4712fd33e61f733d63bfbd0a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 688aa77f4712fd33e61f733d63bfbd0a 2024-12-09T05:14:38,998 INFO [StoreOpener-688aa77f4712fd33e61f733d63bfbd0a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 688aa77f4712fd33e61f733d63bfbd0a columnFamilyName info 2024-12-09T05:14:38,998 DEBUG [StoreOpener-688aa77f4712fd33e61f733d63bfbd0a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:14:38,998 INFO [StoreOpener-688aa77f4712fd33e61f733d63bfbd0a-1 {}] regionserver.HStore(327): Store=688aa77f4712fd33e61f733d63bfbd0a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:14:38,999 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a 2024-12-09T05:14:38,999 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a 2024-12-09T05:14:39,002 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 688aa77f4712fd33e61f733d63bfbd0a 2024-12-09T05:14:39,004 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:14:39,005 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 688aa77f4712fd33e61f733d63bfbd0a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=751387, jitterRate=-0.04456326365470886}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:14:39,006 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 688aa77f4712fd33e61f733d63bfbd0a: 2024-12-09T05:14:39,007 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a., pid=11, masterSystemTime=1733721278987 2024-12-09T05:14:39,009 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:14:39,009 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:14:39,010 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=688aa77f4712fd33e61f733d63bfbd0a, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,36179,1733721277192 2024-12-09T05:14:39,014 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-09T05:14:39,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 688aa77f4712fd33e61f733d63bfbd0a, server=41a709354867,36179,1733721277192 in 178 msec 2024-12-09T05:14:39,017 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T05:14:39,017 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=688aa77f4712fd33e61f733d63bfbd0a, ASSIGN in 337 msec 2024-12-09T05:14:39,018 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:14:39,018 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721279018"}]},"ts":"1733721279018"} 2024-12-09T05:14:39,020 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-09T05:14:39,023 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:14:39,025 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 378 msec 2024-12-09T05:14:40,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:14:40,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:14:41,234 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T05:14:41,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:14:41,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:14:43,450 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T05:14:43,451 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-09T05:14:43,451 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-09T05:14:45,188 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T05:14:45,188 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-09T05:14:45,190 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T05:14:48,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36393 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T05:14:48,650 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath, procId: 9 completed 2024-12-09T05:14:48,654 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T05:14:48,654 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:14:48,667 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:14:48,671 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:14:48,672 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:14:48,672 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:14:48,672 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:14:48,673 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26b0ac62{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:14:48,673 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463114d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:14:48,788 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3cfce79{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/java.io.tmpdir/jetty-localhost-44853-hadoop-hdfs-3_4_1-tests_jar-_-any-7871982469653762886/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:14:48,789 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@31d71545{HTTP/1.1, (http/1.1)}{localhost:44853} 2024-12-09T05:14:48,789 INFO [Time-limited test {}] server.Server(415): Started @130343ms 2024-12-09T05:14:48,791 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:14:48,826 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:14:48,831 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:14:48,832 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:14:48,832 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:14:48,832 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:14:48,832 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bef1398{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:14:48,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5432f05f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:14:48,889 WARN [Thread-632 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data5/current/BP-836600810-172.17.0.2-1733721276277/current, will proceed with Du for space computation calculation, 2024-12-09T05:14:48,889 WARN [Thread-633 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data6/current/BP-836600810-172.17.0.2-1733721276277/current, will proceed with Du for space computation calculation, 2024-12-09T05:14:48,913 WARN [Thread-612 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:14:48,916 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x366bc8bb83bdd738 with lease ID 0xd6c99a4bb2bde8e2: Processing first storage report for DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef from datanode DatanodeRegistration(127.0.0.1:37249, datanodeUuid=4433ab30-befb-4229-b7f1-d08e1c3b9e59, infoPort=38829, infoSecurePort=0, ipcPort=44361, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277) 2024-12-09T05:14:48,916 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x366bc8bb83bdd738 with lease ID 0xd6c99a4bb2bde8e2: from storage DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef node DatanodeRegistration(127.0.0.1:37249, datanodeUuid=4433ab30-befb-4229-b7f1-d08e1c3b9e59, infoPort=38829, infoSecurePort=0, ipcPort=44361, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:14:48,916 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x366bc8bb83bdd738 with lease ID 0xd6c99a4bb2bde8e2: Processing first storage report for DS-8f98997b-0f43-45e1-9ccb-c12efa606fcc from datanode DatanodeRegistration(127.0.0.1:37249, datanodeUuid=4433ab30-befb-4229-b7f1-d08e1c3b9e59, infoPort=38829, infoSecurePort=0, ipcPort=44361, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277) 2024-12-09T05:14:48,916 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x366bc8bb83bdd738 with lease ID 0xd6c99a4bb2bde8e2: from storage DS-8f98997b-0f43-45e1-9ccb-c12efa606fcc node DatanodeRegistration(127.0.0.1:37249, datanodeUuid=4433ab30-befb-4229-b7f1-d08e1c3b9e59, infoPort=38829, infoSecurePort=0, ipcPort=44361, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:14:48,958 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ebcc8b3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/java.io.tmpdir/jetty-localhost-43489-hadoop-hdfs-3_4_1-tests_jar-_-any-4788074087035487175/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:14:48,958 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76ab817c{HTTP/1.1, (http/1.1)}{localhost:43489} 2024-12-09T05:14:48,958 INFO [Time-limited test {}] server.Server(415): Started @130512ms 2024-12-09T05:14:48,960 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:14:48,993 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:14:48,996 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:14:48,997 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:14:48,997 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:14:48,997 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:14:48,998 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26ff292e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:14:48,998 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@462ef7f0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:14:49,056 WARN [Thread-667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data7/current/BP-836600810-172.17.0.2-1733721276277/current, will proceed with Du for space computation calculation, 2024-12-09T05:14:49,057 WARN [Thread-668 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data8/current/BP-836600810-172.17.0.2-1733721276277/current, will proceed with Du for space computation calculation, 2024-12-09T05:14:49,075 WARN [Thread-647 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:14:49,078 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa9f5067472a7c5dc with lease ID 0xd6c99a4bb2bde8e3: Processing first storage report for DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291 from datanode DatanodeRegistration(127.0.0.1:44725, datanodeUuid=3622971f-b3c7-476a-a818-257b60d414f3, infoPort=39905, infoSecurePort=0, ipcPort=40905, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277) 2024-12-09T05:14:49,078 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa9f5067472a7c5dc with lease ID 0xd6c99a4bb2bde8e3: from storage DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291 node DatanodeRegistration(127.0.0.1:44725, datanodeUuid=3622971f-b3c7-476a-a818-257b60d414f3, infoPort=39905, infoSecurePort=0, ipcPort=40905, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T05:14:49,078 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa9f5067472a7c5dc with lease ID 0xd6c99a4bb2bde8e3: Processing first storage report for DS-91a080c5-85e9-458f-baed-a7f8611b805a from datanode DatanodeRegistration(127.0.0.1:44725, datanodeUuid=3622971f-b3c7-476a-a818-257b60d414f3, infoPort=39905, infoSecurePort=0, ipcPort=40905, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277) 2024-12-09T05:14:49,078 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa9f5067472a7c5dc with lease ID 0xd6c99a4bb2bde8e3: from storage DS-91a080c5-85e9-458f-baed-a7f8611b805a node DatanodeRegistration(127.0.0.1:44725, datanodeUuid=3622971f-b3c7-476a-a818-257b60d414f3, infoPort=39905, infoSecurePort=0, ipcPort=40905, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:14:49,118 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77a342b4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/java.io.tmpdir/jetty-localhost-45327-hadoop-hdfs-3_4_1-tests_jar-_-any-6734736277472315015/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:14:49,119 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ce13758{HTTP/1.1, (http/1.1)}{localhost:45327} 2024-12-09T05:14:49,119 INFO [Time-limited test {}] server.Server(415): Started @130673ms 2024-12-09T05:14:49,121 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:14:49,221 WARN [Thread-693 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data9/current/BP-836600810-172.17.0.2-1733721276277/current, will proceed with Du for space computation calculation, 2024-12-09T05:14:49,221 WARN [Thread-694 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data10/current/BP-836600810-172.17.0.2-1733721276277/current, will proceed with Du for space computation calculation, 2024-12-09T05:14:49,239 WARN [Thread-682 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:14:49,242 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67356ba4b951314 with lease ID 0xd6c99a4bb2bde8e4: Processing first storage report for DS-3bb1b516-bb45-4ddd-be62-6728b7e76145 from datanode DatanodeRegistration(127.0.0.1:38239, datanodeUuid=8bc934f7-5434-4876-97e3-9b395251647a, infoPort=36507, infoSecurePort=0, ipcPort=39865, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277) 2024-12-09T05:14:49,242 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67356ba4b951314 with lease ID 0xd6c99a4bb2bde8e4: from storage DS-3bb1b516-bb45-4ddd-be62-6728b7e76145 node DatanodeRegistration(127.0.0.1:38239, datanodeUuid=8bc934f7-5434-4876-97e3-9b395251647a, infoPort=36507, infoSecurePort=0, ipcPort=39865, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:14:49,243 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67356ba4b951314 with lease ID 0xd6c99a4bb2bde8e4: Processing first storage report for DS-223624ef-3c60-4cdc-8f0d-621d5bdbc402 from datanode DatanodeRegistration(127.0.0.1:38239, datanodeUuid=8bc934f7-5434-4876-97e3-9b395251647a, infoPort=36507, infoSecurePort=0, ipcPort=39865, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277) 2024-12-09T05:14:49,243 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67356ba4b951314 with lease ID 0xd6c99a4bb2bde8e4: from storage DS-223624ef-3c60-4cdc-8f0d-621d5bdbc402 node DatanodeRegistration(127.0.0.1:38239, datanodeUuid=8bc934f7-5434-4876-97e3-9b395251647a, infoPort=36507, infoSecurePort=0, ipcPort=39865, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:14:49,247 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27178800{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:14:49,248 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@431af123{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:14:49,248 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:14:49,248 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ead7a1a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:14:49,248 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@416f69b7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,STOPPED} 2024-12-09T05:14:49,251 WARN [BP-836600810-172.17.0.2-1733721276277 heartbeating to localhost/127.0.0.1:45367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:14:49,245 WARN [ResponseProcessor for block BP-836600810-172.17.0.2-1733721276277:blk_1073741839_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-836600810-172.17.0.2-1733721276277:blk_1073741839_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,251 WARN [BP-836600810-172.17.0.2-1733721276277 heartbeating to localhost/127.0.0.1:45367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-836600810-172.17.0.2-1733721276277 (Datanode Uuid c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38) service to localhost/127.0.0.1:45367 2024-12-09T05:14:49,245 WARN [ResponseProcessor for block BP-836600810-172.17.0.2-1733721276277:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-836600810-172.17.0.2-1733721276277:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,245 WARN [ResponseProcessor for block BP-836600810-172.17.0.2-1733721276277:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-836600810-172.17.0.2-1733721276277:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,251 WARN [ResponseProcessor for block BP-836600810-172.17.0.2-1733721276277:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-836600810-172.17.0.2-1733721276277:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-836600810-172.17.0.2-1733721276277:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,252 WARN [DataStreamer for file /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141/41a709354867%2C36393%2C1733721277141.1733721277298 block BP-836600810-172.17.0.2-1733721276277:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK], DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK]) is bad. 2024-12-09T05:14:49,253 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data4/current/BP-836600810-172.17.0.2-1733721276277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:14:49,253 WARN [DataStreamer for file /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta block BP-836600810-172.17.0.2-1733721276277:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK]) is bad. 2024-12-09T05:14:49,252 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:51972 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39683:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51972 dst: /127.0.0.1:39683 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:14:49,253 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:14:49,252 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:14:49,251 WARN [DataStreamer for file /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,41493,1733721278549/41a709354867%2C41493%2C1733721278549.1733721278746 block BP-836600810-172.17.0.2-1733721276277:blk_1073741839_1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741839_1015 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK]) is bad. 2024-12-09T05:14:49,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:54236 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54236 dst: /127.0.0.1:34889 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49770 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:14:49,252 WARN [DataStreamer for file /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 block BP-836600810-172.17.0.2-1733721276277:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK]) is bad. 2024-12-09T05:14:49,253 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:14:49,252 WARN [PacketResponder: BP-836600810-172.17.0.2-1733721276277:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34889] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:14:49,252 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:51966 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39683:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51966 dst: /127.0.0.1:39683 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:14:49,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1510708481_22 at /127.0.0.1:54198 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54198 dst: /127.0.0.1:34889 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49776 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:14:49,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-374856081_22 at /127.0.0.1:54290 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:34889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54290 dst: /127.0.0.1:34889 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49503 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:14:49,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:54234 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54234 dst: /127.0.0.1:34889 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49757 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:14:49,255 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1510708481_22 at /127.0.0.1:51954 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39683:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51954 dst: /127.0.0.1:39683 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:14:49,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-374856081_22 at /127.0.0.1:52016 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:39683:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52016 dst: /127.0.0.1:39683 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:14:49,256 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data3/current/BP-836600810-172.17.0.2-1733721276277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:14:49,259 WARN [DataStreamer for file /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141/41a709354867%2C36393%2C1733721277141.1733721277298 block BP-836600810-172.17.0.2-1733721276277:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@36692090{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:14:49,261 WARN [DataStreamer for file /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,41493,1733721278549/41a709354867%2C41493%2C1733721278549.1733721278746 block BP-836600810-172.17.0.2-1733721276277:blk_1073741839_1015 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1015 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,261 WARN [DataStreamer for file /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 block BP-836600810-172.17.0.2-1733721276277:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,261 WARN [DataStreamer for file /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta block BP-836600810-172.17.0.2-1733721276277:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,262 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e4d7c05{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:14:49,262 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:14:49,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ebcf9b0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:14:49,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27867745{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,STOPPED} 2024-12-09T05:14:49,264 WARN [BP-836600810-172.17.0.2-1733721276277 heartbeating to localhost/127.0.0.1:45367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:14:49,264 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:14:49,264 WARN [BP-836600810-172.17.0.2-1733721276277 heartbeating to localhost/127.0.0.1:45367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-836600810-172.17.0.2-1733721276277 (Datanode Uuid 1a113e70-79f6-49e8-828b-2d450525baf8) service to localhost/127.0.0.1:45367 2024-12-09T05:14:49,264 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:14:49,265 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data1/current/BP-836600810-172.17.0.2-1733721276277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:14:49,265 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data2/current/BP-836600810-172.17.0.2-1733721276277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:14:49,265 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:14:49,270 WARN [RS:0;41a709354867:36179.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=4, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,270 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C36179%2C1733721277192:(num 1733721277601) roll requested 2024-12-09T05:14:49,270 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36179%2C1733721277192.1733721289270 2024-12-09T05:14:49,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36179 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36179 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37968 deadline: 1733721299269, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-09T05:14:49,274 WARN [Thread-704 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1020 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,274 WARN [Thread-704 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK], DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]) is bad. 2024-12-09T05:14:49,274 WARN [Thread-704 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741840_1020 2024-12-09T05:14:49,277 WARN [Thread-704 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK] 2024-12-09T05:14:49,284 WARN [regionserver/41a709354867:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-09T05:14:49,284 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 with entries=4, filesize=959 B; new WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721289270 2024-12-09T05:14:49,285 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39905:39905),(127.0.0.1/127.0.0.1:38829:38829)] 2024-12-09T05:14:49,285 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 is not closed yet, will try archiving it next time 2024-12-09T05:14:49,285 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,285 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:14:49,286 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-09T05:14:49,286 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-09T05:14:49,286 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 2024-12-09T05:14:49,289 WARN [IPC Server handler 1 on default port 45367 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741833_1009 2024-12-09T05:14:49,291 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 after 5ms 2024-12-09T05:14:50,693 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T05:14:50,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:14:50,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:14:50,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:14:50,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:14:53,292 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 after 4006ms 2024-12-09T05:15:01,384 INFO [Time-limited test {}] wal.TestLogRolling(243): log.getCurrentFileName(): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721289270 2024-12-09T05:15:01,385 WARN [ResponseProcessor for block BP-836600810-172.17.0.2-1733721276277:blk_1073741841_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-836600810-172.17.0.2-1733721276277:blk_1073741841_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:01,385 WARN [DataStreamer for file /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721289270 block BP-836600810-172.17.0.2-1733721276277:blk_1073741841_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741841_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK], DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK]) is bad. 2024-12-09T05:15:01,386 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:39802 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741841_1021] {}] datanode.DataXceiver(331): 127.0.0.1:44725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39802 dst: /127.0.0.1:44725 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:01,386 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:57732 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741841_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37249:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57732 dst: /127.0.0.1:37249 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:01,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ebcc8b3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:15:01,388 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76ab817c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:15:01,388 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:15:01,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5432f05f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:15:01,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bef1398{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,STOPPED} 2024-12-09T05:15:01,393 WARN [BP-836600810-172.17.0.2-1733721276277 heartbeating to localhost/127.0.0.1:45367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:15:01,393 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:15:01,393 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:15:01,393 WARN [BP-836600810-172.17.0.2-1733721276277 heartbeating to localhost/127.0.0.1:45367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-836600810-172.17.0.2-1733721276277 (Datanode Uuid 3622971f-b3c7-476a-a818-257b60d414f3) service to localhost/127.0.0.1:45367 2024-12-09T05:15:01,394 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data7/current/BP-836600810-172.17.0.2-1733721276277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:15:01,394 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data8/current/BP-836600810-172.17.0.2-1733721276277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:15:01,394 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:15:01,397 WARN [sync.1 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]] 2024-12-09T05:15:01,397 WARN [sync.1 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]] 2024-12-09T05:15:01,397 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C36179%2C1733721277192:(num 1733721289270) roll requested 2024-12-09T05:15:01,397 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36179%2C1733721277192.1733721301397 2024-12-09T05:15:01,402 WARN [Thread-714 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39683 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:01,401 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:41094 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data6]'}, localName='127.0.0.1:37249', datanodeUuid='4433ab30-befb-4229-b7f1-d08e1c3b9e59', xmitsInProgress=0}:Exception transferring block BP-836600810-172.17.0.2-1733721276277:blk_1073741842_1024 to mirror 127.0.0.1:39683 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:01,402 WARN [Thread-714 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK], DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]) is bad. 2024-12-09T05:15:01,402 WARN [Thread-714 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741842_1024 2024-12-09T05:15:01,402 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:41094 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T05:15:01,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:41094 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37249:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41094 dst: /127.0.0.1:37249 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:01,403 WARN [Thread-714 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK] 2024-12-09T05:15:01,404 WARN [Thread-714 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:01,404 WARN [Thread-714 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK], DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK]) is bad. 2024-12-09T05:15:01,404 WARN [Thread-714 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741843_1025 2024-12-09T05:15:01,405 WARN [Thread-714 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK] 2024-12-09T05:15:01,407 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:41104 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741844_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data6]'}, localName='127.0.0.1:37249', datanodeUuid='4433ab30-befb-4229-b7f1-d08e1c3b9e59', xmitsInProgress=0}:Exception transferring block BP-836600810-172.17.0.2-1733721276277:blk_1073741844_1026 to mirror 127.0.0.1:34889 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:01,407 WARN [Thread-714 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34889 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:01,408 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:41104 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741844_1026] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T05:15:01,408 WARN [Thread-714 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741844_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK], DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK]) is bad. 2024-12-09T05:15:01,408 WARN [Thread-714 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741844_1026 2024-12-09T05:15:01,408 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:41104 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:37249:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41104 dst: /127.0.0.1:37249 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:01,408 WARN [Thread-714 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK] 2024-12-09T05:15:01,414 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721289270 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721301397 2024-12-09T05:15:01,415 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36507:36507),(127.0.0.1/127.0.0.1:38829:38829)] 2024-12-09T05:15:01,415 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 is not closed yet, will try archiving it next time 2024-12-09T05:15:01,415 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721289270 is not closed yet, will try archiving it next time 2024-12-09T05:15:01,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37249 is added to blk_1073741841_1023 (size=2431) 2024-12-09T05:15:01,817 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 is not closed yet, will try archiving it next time 2024-12-09T05:15:03,929 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6b5fb62e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37249, datanodeUuid=4433ab30-befb-4229-b7f1-d08e1c3b9e59, infoPort=38829, infoSecurePort=0, ipcPort=44361, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277):Failed to transfer BP-836600810-172.17.0.2-1733721276277:blk_1073741841_1023 to 127.0.0.1:34889 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:05,401 WARN [ResponseProcessor for block BP-836600810-172.17.0.2-1733721276277:blk_1073741845_1027 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-836600810-172.17.0.2-1733721276277:blk_1073741845_1027 java.io.IOException: Bad response ERROR for BP-836600810-172.17.0.2-1733721276277:blk_1073741845_1027 from datanode DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,401 WARN [DataStreamer for file /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721301397 block BP-836600810-172.17.0.2-1733721276277:blk_1073741845_1027 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741845_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK], DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]) is bad. 2024-12-09T05:15:05,402 WARN [PacketResponder: BP-836600810-172.17.0.2-1733721276277:blk_1073741845_1027, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37249] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:05,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:55234 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741845_1027] {}] datanode.DataXceiver(331): 127.0.0.1:38239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55234 dst: /127.0.0.1:38239 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:05,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:41120 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741845_1027] {}] datanode.DataXceiver(331): 127.0.0.1:37249:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41120 dst: /127.0.0.1:37249 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:05,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3cfce79{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:15:05,405 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@31d71545{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:15:05,405 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:15:05,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463114d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:15:05,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26b0ac62{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,STOPPED} 2024-12-09T05:15:05,407 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:15:05,407 WARN [BP-836600810-172.17.0.2-1733721276277 heartbeating to localhost/127.0.0.1:45367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:15:05,407 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:15:05,407 WARN [BP-836600810-172.17.0.2-1733721276277 heartbeating to localhost/127.0.0.1:45367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-836600810-172.17.0.2-1733721276277 (Datanode Uuid 4433ab30-befb-4229-b7f1-d08e1c3b9e59) service to localhost/127.0.0.1:45367 2024-12-09T05:15:05,408 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data5/current/BP-836600810-172.17.0.2-1733721276277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:15:05,408 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data6/current/BP-836600810-172.17.0.2-1733721276277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:15:05,409 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:15:05,411 WARN [sync.4 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK]] 2024-12-09T05:15:05,411 WARN [sync.4 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK]] 2024-12-09T05:15:05,411 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C36179%2C1733721277192:(num 1733721301397) roll requested 2024-12-09T05:15:05,411 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36179%2C1733721277192.1733721305411 2024-12-09T05:15:05,415 WARN [Thread-728 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,415 WARN [Thread-728 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK]) is bad. 2024-12-09T05:15:05,415 WARN [Thread-728 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741846_1029 2024-12-09T05:15:05,416 WARN [Thread-728 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK] 2024-12-09T05:15:05,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36179 {}] regionserver.HRegion(8581): Flush requested on 688aa77f4712fd33e61f733d63bfbd0a 2024-12-09T05:15:05,417 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 688aa77f4712fd33e61f733d63bfbd0a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:15:05,417 WARN [Thread-728 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,417 WARN [Thread-728 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK], DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK]) is bad. 2024-12-09T05:15:05,417 WARN [Thread-728 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741847_1030 2024-12-09T05:15:05,419 WARN [Thread-728 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK] 2024-12-09T05:15:05,423 WARN [Thread-728 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39683 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,422 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:45366 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data10]'}, localName='127.0.0.1:38239', datanodeUuid='8bc934f7-5434-4876-97e3-9b395251647a', xmitsInProgress=0}:Exception transferring block BP-836600810-172.17.0.2-1733721276277:blk_1073741848_1031 to mirror 127.0.0.1:39683 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:05,423 WARN [Thread-728 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK], DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]) is bad. 2024-12-09T05:15:05,423 WARN [Thread-728 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741848_1031 2024-12-09T05:15:05,423 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:45366 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T05:15:05,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:45366 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:38239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45366 dst: /127.0.0.1:38239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:05,423 WARN [Thread-728 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK] 2024-12-09T05:15:05,426 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:45376 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data10]'}, localName='127.0.0.1:38239', datanodeUuid='8bc934f7-5434-4876-97e3-9b395251647a', xmitsInProgress=0}:Exception transferring block BP-836600810-172.17.0.2-1733721276277:blk_1073741849_1032 to mirror 127.0.0.1:37249 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:05,426 WARN [Thread-728 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37249 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,426 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:45376 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T05:15:05,426 WARN [Thread-728 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK], DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]) is bad. 2024-12-09T05:15:05,427 WARN [Thread-728 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741849_1032 2024-12-09T05:15:05,426 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:45376 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:38239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45376 dst: /127.0.0.1:38239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:05,428 WARN [Thread-728 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK] 2024-12-09T05:15:05,429 WARN [IPC Server handler 3 on default port 45367 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T05:15:05,429 WARN [IPC Server handler 3 on default port 45367 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T05:15:05,429 WARN [IPC Server handler 3 on default port 45367 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T05:15:05,439 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721301397 with entries=13, filesize=14.10 KB; new WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721305411 2024-12-09T05:15:05,439 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36507:36507)] 2024-12-09T05:15:05,439 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 is not closed yet, will try archiving it next time 2024-12-09T05:15:05,439 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721301397 is not closed yet, will try archiving it next time 2024-12-09T05:15:05,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741845_1028 (size=14443) 2024-12-09T05:15:05,442 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 is not closed yet, will try archiving it next time 2024-12-09T05:15:05,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a/.tmp/info/22e138fe32a742bfb8d43edc035defb3 is 1080, key is row0002/info:/1733721301395/Put/seqid=0 2024-12-09T05:15:05,446 WARN [Thread-730 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,446 WARN [Thread-730 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK]) is bad. 2024-12-09T05:15:05,446 WARN [Thread-730 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741851_1034 2024-12-09T05:15:05,447 WARN [Thread-730 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK] 2024-12-09T05:15:05,448 WARN [Thread-730 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,449 WARN [Thread-730 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK], DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]) is bad. 2024-12-09T05:15:05,449 WARN [Thread-730 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741852_1035 2024-12-09T05:15:05,449 WARN [Thread-730 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK] 2024-12-09T05:15:05,459 WARN [Thread-730 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39683 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,460 WARN [Thread-730 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK], DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]) is bad. 2024-12-09T05:15:05,460 WARN [Thread-730 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741853_1036 2024-12-09T05:15:05,459 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:45382 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data10]'}, localName='127.0.0.1:38239', datanodeUuid='8bc934f7-5434-4876-97e3-9b395251647a', xmitsInProgress=0}:Exception transferring block BP-836600810-172.17.0.2-1733721276277:blk_1073741853_1036 to mirror 127.0.0.1:39683 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:05,460 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:45382 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T05:15:05,460 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:45382 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:38239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45382 dst: /127.0.0.1:38239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:05,460 WARN [Thread-730 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK] 2024-12-09T05:15:05,462 WARN [Thread-730 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,462 WARN [Thread-730 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK], DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK]) is bad. 2024-12-09T05:15:05,462 WARN [Thread-730 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741854_1037 2024-12-09T05:15:05,463 WARN [Thread-730 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK] 2024-12-09T05:15:05,463 WARN [IPC Server handler 1 on default port 45367 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T05:15:05,464 WARN [IPC Server handler 1 on default port 45367 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T05:15:05,464 WARN [IPC Server handler 1 on default port 45367 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T05:15:05,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741855_1038 (size=10347) 2024-12-09T05:15:05,633 WARN [sync.2 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK]] 2024-12-09T05:15:05,634 WARN [sync.2 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK]] 2024-12-09T05:15:05,634 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C36179%2C1733721277192:(num 1733721305411) roll requested 2024-12-09T05:15:05,634 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36179%2C1733721277192.1733721305634 2024-12-09T05:15:05,638 WARN [Thread-739 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,638 WARN [Thread-739 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK], DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]) is bad. 2024-12-09T05:15:05,638 WARN [Thread-739 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741856_1039 2024-12-09T05:15:05,639 WARN [Thread-739 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK] 2024-12-09T05:15:05,640 WARN [Thread-739 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,640 WARN [Thread-739 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK], DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]) is bad. 2024-12-09T05:15:05,641 WARN [Thread-739 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741857_1040 2024-12-09T05:15:05,641 WARN [Thread-739 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK] 2024-12-09T05:15:05,642 WARN [Thread-739 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,643 WARN [Thread-739 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK]) is bad. 2024-12-09T05:15:05,643 WARN [Thread-739 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741858_1041 2024-12-09T05:15:05,644 WARN [Thread-739 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34889,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK] 2024-12-09T05:15:05,646 WARN [Thread-739 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44725 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:05,646 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:45400 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data10]'}, localName='127.0.0.1:38239', datanodeUuid='8bc934f7-5434-4876-97e3-9b395251647a', xmitsInProgress=0}:Exception transferring block BP-836600810-172.17.0.2-1733721276277:blk_1073741859_1042 to mirror 127.0.0.1:44725 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:05,647 WARN [Thread-739 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK], DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK]) is bad. 2024-12-09T05:15:05,647 WARN [Thread-739 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741859_1042 2024-12-09T05:15:05,647 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:45400 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T05:15:05,647 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2089587403_22 at /127.0.0.1:45400 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:38239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45400 dst: /127.0.0.1:38239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:05,647 WARN [Thread-739 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK] 2024-12-09T05:15:05,648 WARN [IPC Server handler 2 on default port 45367 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T05:15:05,648 WARN [IPC Server handler 2 on default port 45367 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T05:15:05,648 WARN [IPC Server handler 2 on default port 45367 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T05:15:05,655 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721305411 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721305634 2024-12-09T05:15:05,655 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36507:36507)] 2024-12-09T05:15:05,655 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 is not closed yet, will try archiving it next time 2024-12-09T05:15:05,655 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721305411 is not closed yet, will try archiving it next time 2024-12-09T05:15:05,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741850_1033 (size=1261) 2024-12-09T05:15:05,836 WARN [sync.4 {}] wal.FSHLog(760): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-09T05:15:05,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a/.tmp/info/22e138fe32a742bfb8d43edc035defb3 2024-12-09T05:15:05,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a/.tmp/info/22e138fe32a742bfb8d43edc035defb3 as hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a/info/22e138fe32a742bfb8d43edc035defb3 2024-12-09T05:15:05,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a/info/22e138fe32a742bfb8d43edc035defb3, entries=5, sequenceid=12, filesize=10.1 K 2024-12-09T05:15:05,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 688aa77f4712fd33e61f733d63bfbd0a in 470ms, sequenceid=12, compaction requested=false 2024-12-09T05:15:05,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 688aa77f4712fd33e61f733d63bfbd0a: 2024-12-09T05:15:06,048 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:15:06,054 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:15:06,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:15:06,055 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:15:06,055 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:15:06,055 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e2825a4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:15:06,056 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1897f08a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:15:06,058 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721277601 is not closed yet, will try archiving it next time 2024-12-09T05:15:06,058 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721289270 to hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/oldWALs/41a709354867%2C36179%2C1733721277192.1733721289270 2024-12-09T05:15:06,198 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f5a0249{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/java.io.tmpdir/jetty-localhost-40121-hadoop-hdfs-3_4_1-tests_jar-_-any-7258992694429352385/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:15:06,199 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59948c0f{HTTP/1.1, (http/1.1)}{localhost:40121} 2024-12-09T05:15:06,199 INFO [Time-limited test {}] server.Server(415): Started @147753ms 2024-12-09T05:15:06,201 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:15:06,308 WARN [Thread-759 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:15:06,318 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdc7016fdb0b5d316 with lease ID 0xd6c99a4bb2bde8e5: from storage DS-51f684b6-5442-4769-a5b7-81ca384ca30c node DatanodeRegistration(127.0.0.1:42445, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=33343, infoSecurePort=0, ipcPort=41859, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:15:06,319 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdc7016fdb0b5d316 with lease ID 0xd6c99a4bb2bde8e5: from storage DS-7f4167d3-6738-47e8-b1b7-2244c6f43708 node DatanodeRegistration(127.0.0.1:42445, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=33343, infoSecurePort=0, ipcPort=41859, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T05:15:07,120 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T05:15:07,246 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@692f06e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38239, datanodeUuid=8bc934f7-5434-4876-97e3-9b395251647a, infoPort=36507, infoSecurePort=0, ipcPort=39865, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277):Failed to transfer BP-836600810-172.17.0.2-1733721276277:blk_1073741845_1028 to 127.0.0.1:44725 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:07,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42445 is added to blk_1073741855_1038 (size=10347) 2024-12-09T05:15:07,381 WARN [master/41a709354867:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=96, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:07,381 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C36393%2C1733721277141:(num 1733721277298) roll requested 2024-12-09T05:15:07,382 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:07,382 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36393%2C1733721277141.1733721307382 2024-12-09T05:15:07,382 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:07,386 WARN [Thread-784 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:07,386 WARN [Thread-784 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK], DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK]) is bad. 2024-12-09T05:15:07,386 WARN [Thread-784 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741861_1044 2024-12-09T05:15:07,387 WARN [Thread-784 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44725,DS-9f8ca230-345a-47bf-bfd3-7d410a7fe291,DISK] 2024-12-09T05:15:07,391 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL 2024-12-09T05:15:07,391 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141/41a709354867%2C36393%2C1733721277141.1733721277298 with entries=93, filesize=46.04 KB; new WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141/41a709354867%2C36393%2C1733721277141.1733721307382 2024-12-09T05:15:07,392 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36507:36507),(127.0.0.1/127.0.0.1:33343:33343)] 2024-12-09T05:15:07,392 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141/41a709354867%2C36393%2C1733721277141.1733721277298 is not closed yet, will try archiving it next time 2024-12-09T05:15:07,392 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:07,392 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:07,392 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141/41a709354867%2C36393%2C1733721277141.1733721277298 2024-12-09T05:15:07,393 WARN [IPC Server handler 3 on default port 45367 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141/41a709354867%2C36393%2C1733721277141.1733721277298 has not been closed. Lease recovery is in progress. RecoveryId = 1046 for block blk_1073741830_1006 2024-12-09T05:15:07,393 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141/41a709354867%2C36393%2C1733721277141.1733721277298 after 1ms 2024-12-09T05:15:08,246 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@8550ecc[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38239, datanodeUuid=8bc934f7-5434-4876-97e3-9b395251647a, infoPort=36507, infoSecurePort=0, ipcPort=39865, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277):Failed to transfer BP-836600810-172.17.0.2-1733721276277:blk_1073741850_1033 to 127.0.0.1:37249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:08,436 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:15:08,437 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38412, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:15:11,394 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141/41a709354867%2C36393%2C1733721277141.1733721277298 after 4002ms 2024-12-09T05:15:11,588 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:15:11,590 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38420, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:15:16,334 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@60a13c80 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-836600810-172.17.0.2-1733721276277:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:39683,null,null]) java.net.ConnectException: Call From 41a709354867/172.17.0.2 to localhost:33617 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T05:15:16,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42445 is added to blk_1073741833_1022 (size=959) 2024-12-09T05:15:18,512 INFO [master/41a709354867:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T05:15:18,512 INFO [master/41a709354867:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T05:15:19,312 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6922a757[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42445, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=33343, infoSecurePort=0, ipcPort=41859, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277):Failed to transfer BP-836600810-172.17.0.2-1733721276277:blk_1073741837_1013 to 127.0.0.1:44725 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:19,312 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@510b1a00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42445, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=33343, infoSecurePort=0, ipcPort=41859, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277):Failed to transfer BP-836600810-172.17.0.2-1733721276277:blk_1073741833_1022 to 127.0.0.1:44725 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:20,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:15:20,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:15:22,312 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@510b1a00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42445, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=33343, infoSecurePort=0, ipcPort=41859, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277):Failed to transfer BP-836600810-172.17.0.2-1733721276277:blk_1073741832_1008 to 127.0.0.1:37249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:22,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:15:23,312 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@510b1a00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42445, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=33343, infoSecurePort=0, ipcPort=41859, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277):Failed to transfer BP-836600810-172.17.0.2-1733721276277:blk_1073741826_1002 to 127.0.0.1:37249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:23,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:15:23,994 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 688aa77f4712fd33e61f733d63bfbd0a, had cached 0 bytes from a total of 10347 2024-12-09T05:15:25,015 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36179%2C1733721277192.1733721325015 2024-12-09T05:15:25,025 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721305634 with entries=2, filesize=1.57 KB; new WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721325015 2024-12-09T05:15:25,025 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33343:33343),(127.0.0.1/127.0.0.1:36507:36507)] 2024-12-09T05:15:25,025 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.1733721305634 is not closed yet, will try archiving it next time 2024-12-09T05:15:25,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741860_1043 (size=1618) 2024-12-09T05:15:25,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36179 {}] regionserver.HRegion(8581): Flush requested on 688aa77f4712fd33e61f733d63bfbd0a 2024-12-09T05:15:25,028 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 688aa77f4712fd33e61f733d63bfbd0a 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-09T05:15:25,029 INFO [sync.3 {}] wal.FSHLog(777): LowReplication-Roller was enabled. 2024-12-09T05:15:25,040 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-09T05:15:25,040 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-09T05:15:25,040 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x193d035d to 127.0.0.1:55669 2024-12-09T05:15:25,040 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:15:25,040 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T05:15:25,041 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1341200629, stopped=false 2024-12-09T05:15:25,041 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=41a709354867,36393,1733721277141 2024-12-09T05:15:25,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:15:25,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:15:25,043 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-09T05:15:25,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:25,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:25,043 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:15:25,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41493-0x1007532fba90003, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:15:25,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41493-0x1007532fba90003, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:25,043 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,36179,1733721277192' ***** 2024-12-09T05:15:25,044 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-09T05:15:25,044 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,41493,1733721278549' ***** 2024-12-09T05:15:25,044 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-09T05:15:25,044 INFO [RS:0;41a709354867:36179 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:15:25,044 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:15:25,044 INFO [RS:1;41a709354867:41493 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:15:25,044 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41493-0x1007532fba90003, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:15:25,044 INFO [RS:1;41a709354867:41493 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:15:25,044 INFO [RS:1;41a709354867:41493 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:15:25,044 INFO [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,41493,1733721278549 2024-12-09T05:15:25,044 DEBUG [RS:1;41a709354867:41493 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:15:25,045 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:15:25,045 INFO [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,41493,1733721278549; all regions closed. 2024-12-09T05:15:25,045 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-09T05:15:25,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a/.tmp/info/e4a5f5c9dac54a03a1608352cf6fa20f is 1080, key is row0007/info:/1733721305417/Put/seqid=0 2024-12-09T05:15:25,053 WARN [Thread-805 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:25,053 WARN [Thread-805 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741864_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK], DatanodeInfoWithStorage[127.0.0.1:42445,DS-51f684b6-5442-4769-a5b7-81ca384ca30c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]) is bad. 2024-12-09T05:15:25,053 WARN [Thread-805 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741864_1048 2024-12-09T05:15:25,054 WARN [Thread-805 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK] 2024-12-09T05:15:25,068 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,41493,1733721278549 2024-12-09T05:15:25,068 WARN [WAL-Shutdown-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:25,068 ERROR [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(1664): Shutdown / close of WAL failed: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... 2024-12-09T05:15:25,068 DEBUG [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(1665): Shutdown / close exception details: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:25,068 DEBUG [RS:1;41a709354867:41493 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:15:25,068 INFO [RS:1;41a709354867:41493 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:15:25,069 INFO [RS:1;41a709354867:41493 {}] hbase.ChoreService(370): Chore service for: regionserver/41a709354867:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T05:15:25,069 INFO [RS:1;41a709354867:41493 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:15:25,069 INFO [RS:1;41a709354867:41493 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:15:25,069 INFO [RS:1;41a709354867:41493 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:15:25,069 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:15:25,069 INFO [RS:1;41a709354867:41493 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41493 2024-12-09T05:15:25,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41493-0x1007532fba90003, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/41a709354867,41493,1733721278549 2024-12-09T05:15:25,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:15:25,072 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [41a709354867,41493,1733721278549] 2024-12-09T05:15:25,072 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 41a709354867,41493,1733721278549; numProcessing=1 2024-12-09T05:15:25,074 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/41a709354867,41493,1733721278549 already deleted, retry=false 2024-12-09T05:15:25,074 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 41a709354867,41493,1733721278549 expired; onlineServers=1 2024-12-09T05:15:25,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741865_1049 (size=13583) 2024-12-09T05:15:25,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42445 is added to blk_1073741865_1049 (size=13583) 2024-12-09T05:15:25,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=25 (bloomFilter=true), to=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a/.tmp/info/e4a5f5c9dac54a03a1608352cf6fa20f 2024-12-09T05:15:25,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a/.tmp/info/e4a5f5c9dac54a03a1608352cf6fa20f as hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a/info/e4a5f5c9dac54a03a1608352cf6fa20f 2024-12-09T05:15:25,092 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a/info/e4a5f5c9dac54a03a1608352cf6fa20f, entries=8, sequenceid=25, filesize=13.3 K 2024-12-09T05:15:25,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~10.50 KB/10757, heapSize ~11.48 KB/11760, currentSize=9.46 KB/9684 for 688aa77f4712fd33e61f733d63bfbd0a in 67ms, sequenceid=25, compaction requested=false 2024-12-09T05:15:25,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 688aa77f4712fd33e61f733d63bfbd0a: 2024-12-09T05:15:25,094 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=23.4 K, sizeToCheck=16.0 K 2024-12-09T05:15:25,094 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T05:15:25,094 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/default/TestLogRolling-testLogRollOnDatanodeDeath/688aa77f4712fd33e61f733d63bfbd0a/info/e4a5f5c9dac54a03a1608352cf6fa20f because midkey is the same as first or last row 2024-12-09T05:15:25,094 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-09T05:15:25,094 INFO [RS:0;41a709354867:36179 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:15:25,094 INFO [RS:0;41a709354867:36179 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:15:25,095 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(3579): Received CLOSE for b115246a15fd3345eb9dae2059e50f32 2024-12-09T05:15:25,095 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(3579): Received CLOSE for 688aa77f4712fd33e61f733d63bfbd0a 2024-12-09T05:15:25,095 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,36179,1733721277192 2024-12-09T05:15:25,095 DEBUG [RS:0;41a709354867:36179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:15:25,095 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing b115246a15fd3345eb9dae2059e50f32, disabling compactions & flushes 2024-12-09T05:15:25,095 INFO [RS:0;41a709354867:36179 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:15:25,095 INFO [RS:0;41a709354867:36179 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:15:25,095 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. 2024-12-09T05:15:25,095 INFO [RS:0;41a709354867:36179 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:15:25,095 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. 2024-12-09T05:15:25,095 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-09T05:15:25,095 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. after waiting 0 ms 2024-12-09T05:15:25,095 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. 2024-12-09T05:15:25,095 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing b115246a15fd3345eb9dae2059e50f32 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-09T05:15:25,095 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-09T05:15:25,096 DEBUG [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1603): Online Regions={b115246a15fd3345eb9dae2059e50f32=hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32., 688aa77f4712fd33e61f733d63bfbd0a=TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T05:15:25,096 DEBUG [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 688aa77f4712fd33e61f733d63bfbd0a, b115246a15fd3345eb9dae2059e50f32 2024-12-09T05:15:25,096 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:15:25,096 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:15:25,096 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:15:25,096 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:15:25,096 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:15:25,096 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.87 KB heapSize=5.40 KB 2024-12-09T05:15:25,096 WARN [RS_OPEN_META-regionserver/41a709354867:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:25,097 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C36179%2C1733721277192.meta:.meta(num 1733721277996) roll requested 2024-12-09T05:15:25,097 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:15:25,097 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36179%2C1733721277192.meta.1733721325097.meta 2024-12-09T05:15:25,097 ERROR [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2808): ***** ABORTING region server 41a709354867,36179,1733721277192: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:25,098 ERROR [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2815): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-09T05:15:25,100 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-09T05:15:25,102 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-09T05:15:25,102 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-09T05:15:25,102 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-09T05:15:25,102 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2819): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 270405120 }, "NonHeapMemoryUsage": { "committed": 162529280, "init": 7667712, "max": -1, "used": 160682048 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-12-09T05:15:25,106 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36393 {}] master.MasterRpcServices(626): 41a709354867,36179,1733721277192 reported a fatal error: ***** ABORTING region server 41a709354867,36179,1733721277192: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** Cause: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) 2024-12-09T05:15:25,106 WARN [regionserver/41a709354867:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-09T05:15:25,107 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta with entries=11, filesize=3.63 KB; new WAL /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721325097.meta 2024-12-09T05:15:25,108 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36507:36507),(127.0.0.1/127.0.0.1:33343:33343)] 2024-12-09T05:15:25,108 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta is not closed yet, will try archiving it next time 2024-12-09T05:15:25,108 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:25,108 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39683,DS-7fbf4c63-32af-429c-9350-fa24a258ab0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:25,108 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta 2024-12-09T05:15:25,109 WARN [IPC Server handler 1 on default port 45367 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta has not been closed. Lease recovery is in progress. RecoveryId = 1051 for block blk_1073741834_1010 2024-12-09T05:15:25,109 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta after 1ms 2024-12-09T05:15:25,114 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/namespace/b115246a15fd3345eb9dae2059e50f32/.tmp/info/1f1152afcd2e4c978ec5bdea5edd964f is 45, key is default/info:d/1733721278465/Put/seqid=0 2024-12-09T05:15:25,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741867_1052 (size=5037) 2024-12-09T05:15:25,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42445 is added to blk_1073741867_1052 (size=5037) 2024-12-09T05:15:25,120 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/namespace/b115246a15fd3345eb9dae2059e50f32/.tmp/info/1f1152afcd2e4c978ec5bdea5edd964f 2024-12-09T05:15:25,128 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/namespace/b115246a15fd3345eb9dae2059e50f32/.tmp/info/1f1152afcd2e4c978ec5bdea5edd964f as hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/namespace/b115246a15fd3345eb9dae2059e50f32/info/1f1152afcd2e4c978ec5bdea5edd964f 2024-12-09T05:15:25,134 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/namespace/b115246a15fd3345eb9dae2059e50f32/info/1f1152afcd2e4c978ec5bdea5edd964f, entries=2, sequenceid=6, filesize=4.9 K 2024-12-09T05:15:25,135 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for b115246a15fd3345eb9dae2059e50f32 in 40ms, sequenceid=6, compaction requested=false 2024-12-09T05:15:25,140 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/data/hbase/namespace/b115246a15fd3345eb9dae2059e50f32/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T05:15:25,141 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. 2024-12-09T05:15:25,141 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for b115246a15fd3345eb9dae2059e50f32: 2024-12-09T05:15:25,141 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733721278050.b115246a15fd3345eb9dae2059e50f32. 2024-12-09T05:15:25,141 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 688aa77f4712fd33e61f733d63bfbd0a, disabling compactions & flushes 2024-12-09T05:15:25,141 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:15:25,141 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:15:25,141 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. after waiting 0 ms 2024-12-09T05:15:25,141 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:15:25,141 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 688aa77f4712fd33e61f733d63bfbd0a: 2024-12-09T05:15:25,141 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:15:25,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41493-0x1007532fba90003, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:15:25,172 INFO [RS:1;41a709354867:41493 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,41493,1733721278549; zookeeper connection closed. 2024-12-09T05:15:25,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41493-0x1007532fba90003, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:15:25,173 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4339d66e {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4339d66e 2024-12-09T05:15:25,188 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-09T05:15:25,296 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(3579): Received CLOSE for 688aa77f4712fd33e61f733d63bfbd0a 2024-12-09T05:15:25,296 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-09T05:15:25,296 DEBUG [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 688aa77f4712fd33e61f733d63bfbd0a 2024-12-09T05:15:25,296 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 688aa77f4712fd33e61f733d63bfbd0a, disabling compactions & flushes 2024-12-09T05:15:25,296 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:15:25,296 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:15:25,296 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:15:25,297 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:15:25,297 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:15:25,297 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:15:25,297 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. after waiting 0 ms 2024-12-09T05:15:25,297 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:15:25,297 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:15:25,297 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:15:25,297 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 688aa77f4712fd33e61f733d63bfbd0a: 2024-12-09T05:15:25,297 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnDatanodeDeath,,1733721278644.688aa77f4712fd33e61f733d63bfbd0a. 2024-12-09T05:15:25,297 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:meta,,1.1588230740 2024-12-09T05:15:25,463 INFO [regionserver/41a709354867:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:15:25,496 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1624): We were exiting though online regions are not empty, because some regions failed closing 2024-12-09T05:15:25,496 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,36179,1733721277192; all regions closed. 2024-12-09T05:15:25,497 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192 2024-12-09T05:15:25,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42445 is added to blk_1073741866_1050 (size=93) 2024-12-09T05:15:25,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741866_1050 (size=93) 2024-12-09T05:15:25,502 INFO [regionserver/41a709354867:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T05:15:25,503 INFO [regionserver/41a709354867:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T05:15:26,312 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6922a757[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42445, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=33343, infoSecurePort=0, ipcPort=41859, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277):Failed to transfer BP-836600810-172.17.0.2-1733721276277:blk_1073741829_1005 to 127.0.0.1:37249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:26,312 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@510b1a00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42445, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=33343, infoSecurePort=0, ipcPort=41859, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277):Failed to transfer BP-836600810-172.17.0.2-1733721276277:blk_1073741827_1003 to 127.0.0.1:37249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:26,339 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1162df79 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-836600810-172.17.0.2-1733721276277:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:39683,null,null]) java.net.ConnectException: Call From 41a709354867/172.17.0.2 to localhost:33617 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T05:15:26,610 INFO [regionserver/41a709354867:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:15:27,312 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@510b1a00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42445, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=33343, infoSecurePort=0, ipcPort=41859, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277):Failed to transfer BP-836600810-172.17.0.2-1733721276277:blk_1073741825_1001 to 127.0.0.1:37249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:27,312 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6922a757[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42445, datanodeUuid=c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38, infoPort=33343, infoSecurePort=0, ipcPort=41859, storageInfo=lv=-57;cid=testClusterID;nsid=177128257;c=1733721276277):Failed to transfer BP-836600810-172.17.0.2-1733721276277:blk_1073741838_1014 to 127.0.0.1:37249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:28,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42445 is added to blk_1073741860_1043 (size=1618) 2024-12-09T05:15:28,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741830_1046 (size=47148) 2024-12-09T05:15:29,110 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta after 4002ms 2024-12-09T05:15:30,142 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:30,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:30,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:30,158 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:30,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:30,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:30,500 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T05:15:30,501 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192 2024-12-09T05:15:30,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741863_1047 (size=13280) 2024-12-09T05:15:30,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42445 is added to blk_1073741863_1047 (size=13280) 2024-12-09T05:15:30,504 DEBUG [RS:0;41a709354867:36179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:15:30,504 INFO [RS:0;41a709354867:36179 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:15:30,504 INFO [RS:0;41a709354867:36179 {}] hbase.ChoreService(370): Chore service for: regionserver/41a709354867:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-09T05:15:30,504 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:15:30,505 INFO [RS:0;41a709354867:36179 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36179 2024-12-09T05:15:30,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:15:30,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/41a709354867,36179,1733721277192 2024-12-09T05:15:30,508 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [41a709354867,36179,1733721277192] 2024-12-09T05:15:30,508 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 41a709354867,36179,1733721277192; numProcessing=2 2024-12-09T05:15:30,510 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/41a709354867,36179,1733721277192 already deleted, retry=false 2024-12-09T05:15:30,510 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 41a709354867,36179,1733721277192 expired; onlineServers=0 2024-12-09T05:15:30,510 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,36393,1733721277141' ***** 2024-12-09T05:15:30,510 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T05:15:30,510 DEBUG [M:0;41a709354867:36393 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b2d0891, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:15:30,510 INFO [M:0;41a709354867:36393 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,36393,1733721277141 2024-12-09T05:15:30,510 INFO [M:0;41a709354867:36393 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,36393,1733721277141; all regions closed. 2024-12-09T05:15:30,510 DEBUG [M:0;41a709354867:36393 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:15:30,510 DEBUG [M:0;41a709354867:36393 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T05:15:30,510 DEBUG [M:0;41a709354867:36393 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T05:15:30,510 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T05:15:30,510 DEBUG [master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721277382 {}] cleaner.HFileCleaner(306): Exit Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721277382,5,FailOnTimeoutGroup] 2024-12-09T05:15:30,510 DEBUG [master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721277382 {}] cleaner.HFileCleaner(306): Exit Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721277382,5,FailOnTimeoutGroup] 2024-12-09T05:15:30,511 INFO [M:0;41a709354867:36393 {}] hbase.ChoreService(370): Chore service for: master/41a709354867:0 had [] on shutdown 2024-12-09T05:15:30,511 DEBUG [M:0;41a709354867:36393 {}] master.HMaster(1733): Stopping service threads 2024-12-09T05:15:30,511 INFO [M:0;41a709354867:36393 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T05:15:30,511 INFO [M:0;41a709354867:36393 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T05:15:30,511 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T05:15:30,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T05:15:30,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:30,512 DEBUG [M:0;41a709354867:36393 {}] zookeeper.ZKUtil(347): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T05:15:30,512 WARN [M:0;41a709354867:36393 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T05:15:30,512 INFO [M:0;41a709354867:36393 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-09T05:15:30,512 INFO [M:0;41a709354867:36393 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T05:15:30,512 DEBUG [M:0;41a709354867:36393 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:15:30,512 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:15:30,512 INFO [M:0;41a709354867:36393 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:15:30,512 DEBUG [M:0;41a709354867:36393 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:15:30,512 DEBUG [M:0;41a709354867:36393 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:15:30,512 DEBUG [M:0;41a709354867:36393 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:15:30,513 INFO [M:0;41a709354867:36393 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.08 KB heapSize=49.29 KB 2024-12-09T05:15:30,529 DEBUG [M:0;41a709354867:36393 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3edb8923a12e48d2aee45a67b4896d0e is 82, key is hbase:meta,,1/info:regioninfo/1733721278024/Put/seqid=0 2024-12-09T05:15:30,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741868_1053 (size=5672) 2024-12-09T05:15:30,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42445 is added to blk_1073741868_1053 (size=5672) 2024-12-09T05:15:30,535 INFO [M:0;41a709354867:36393 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3edb8923a12e48d2aee45a67b4896d0e 2024-12-09T05:15:30,555 DEBUG [M:0;41a709354867:36393 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/de062ba47a8f4154b716db4ed353bfe7 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733721279024/Put/seqid=0 2024-12-09T05:15:30,556 WARN [Thread-838 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:30,556 WARN [Thread-838 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741869_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK], DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]) is bad. 2024-12-09T05:15:30,557 WARN [Thread-838 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741869_1054 2024-12-09T05:15:30,557 WARN [Thread-838 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK] 2024-12-09T05:15:30,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741870_1055 (size=7465) 2024-12-09T05:15:30,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42445 is added to blk_1073741870_1055 (size=7465) 2024-12-09T05:15:30,562 INFO [M:0;41a709354867:36393 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.41 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/de062ba47a8f4154b716db4ed353bfe7 2024-12-09T05:15:30,581 DEBUG [M:0;41a709354867:36393 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/69280186c8ba435493220d7267010646 is 69, key is 41a709354867,36179,1733721277192/rs:state/1733721277441/Put/seqid=0 2024-12-09T05:15:30,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741871_1056 (size=5224) 2024-12-09T05:15:30,586 INFO [M:0;41a709354867:36393 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/69280186c8ba435493220d7267010646 2024-12-09T05:15:30,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42445 is added to blk_1073741871_1056 (size=5224) 2024-12-09T05:15:30,605 DEBUG [M:0;41a709354867:36393 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/73af597b5d644de2b9dc38a3a62b7381 is 52, key is load_balancer_on/state:d/1733721278524/Put/seqid=0 2024-12-09T05:15:30,607 WARN [Thread-850 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37249 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:30,607 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1510708481_22 at /127.0.0.1:44666 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741872_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data10]'}, localName='127.0.0.1:38239', datanodeUuid='8bc934f7-5434-4876-97e3-9b395251647a', xmitsInProgress=0}:Exception transferring block BP-836600810-172.17.0.2-1733721276277:blk_1073741872_1057 to mirror 127.0.0.1:37249 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:30,608 WARN [Thread-850 {}] hdfs.DataStreamer(1731): Error Recovery for BP-836600810-172.17.0.2-1733721276277:blk_1073741872_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38239,DS-3bb1b516-bb45-4ddd-be62-6728b7e76145,DISK], DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK]) is bad. 2024-12-09T05:15:30,608 WARN [Thread-850 {}] hdfs.DataStreamer(1850): Abandoning BP-836600810-172.17.0.2-1733721276277:blk_1073741872_1057 2024-12-09T05:15:30,608 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1510708481_22 at /127.0.0.1:44666 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741872_1057] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T05:15:30,608 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1510708481_22 at /127.0.0.1:44666 [Receiving block BP-836600810-172.17.0.2-1733721276277:blk_1073741872_1057] {}] datanode.DataXceiver(331): 127.0.0.1:38239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44666 dst: /127.0.0.1:38239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:30,608 WARN [Thread-850 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37249,DS-1013d44d-160d-4ef0-b20e-2357b4b1c3ef,DISK] 2024-12-09T05:15:30,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:15:30,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36179-0x1007532fba90001, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:15:30,609 INFO [RS:0;41a709354867:36179 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,36179,1733721277192; zookeeper connection closed. 2024-12-09T05:15:30,609 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1ac511ac {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1ac511ac 2024-12-09T05:15:30,609 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-09T05:15:30,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42445 is added to blk_1073741873_1058 (size=5056) 2024-12-09T05:15:30,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741873_1058 (size=5056) 2024-12-09T05:15:30,618 INFO [M:0;41a709354867:36393 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/73af597b5d644de2b9dc38a3a62b7381 2024-12-09T05:15:30,624 DEBUG [M:0;41a709354867:36393 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3edb8923a12e48d2aee45a67b4896d0e as hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3edb8923a12e48d2aee45a67b4896d0e 2024-12-09T05:15:30,629 INFO [M:0;41a709354867:36393 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3edb8923a12e48d2aee45a67b4896d0e, entries=8, sequenceid=97, filesize=5.5 K 2024-12-09T05:15:30,630 DEBUG [M:0;41a709354867:36393 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/de062ba47a8f4154b716db4ed353bfe7 as hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/de062ba47a8f4154b716db4ed353bfe7 2024-12-09T05:15:30,634 INFO [M:0;41a709354867:36393 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/de062ba47a8f4154b716db4ed353bfe7, entries=11, sequenceid=97, filesize=7.3 K 2024-12-09T05:15:30,635 DEBUG [M:0;41a709354867:36393 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/69280186c8ba435493220d7267010646 as hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/69280186c8ba435493220d7267010646 2024-12-09T05:15:30,640 INFO [M:0;41a709354867:36393 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/69280186c8ba435493220d7267010646, entries=2, sequenceid=97, filesize=5.1 K 2024-12-09T05:15:30,641 DEBUG [M:0;41a709354867:36393 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/73af597b5d644de2b9dc38a3a62b7381 as hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/73af597b5d644de2b9dc38a3a62b7381 2024-12-09T05:15:30,646 INFO [M:0;41a709354867:36393 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/73af597b5d644de2b9dc38a3a62b7381, entries=1, sequenceid=97, filesize=4.9 K 2024-12-09T05:15:30,647 INFO [M:0;41a709354867:36393 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.08 KB/41039, heapSize ~49.23 KB/50408, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=97, compaction requested=false 2024-12-09T05:15:30,648 INFO [M:0;41a709354867:36393 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:15:30,649 DEBUG [M:0;41a709354867:36393 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:15:30,649 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/MasterData/WALs/41a709354867,36393,1733721277141 2024-12-09T05:15:30,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42445 is added to blk_1073741862_1045 (size=757) 2024-12-09T05:15:30,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38239 is added to blk_1073741862_1045 (size=757) 2024-12-09T05:15:30,652 INFO [M:0;41a709354867:36393 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-09T05:15:30,652 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:15:30,652 INFO [M:0;41a709354867:36393 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36393 2024-12-09T05:15:30,655 DEBUG [M:0;41a709354867:36393 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/41a709354867,36393,1733721277141 already deleted, retry=false 2024-12-09T05:15:30,666 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T05:15:30,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:30,679 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:30,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:30,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:30,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:30,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:30,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:15:30,757 INFO [M:0;41a709354867:36393 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,36393,1733721277141; zookeeper connection closed. 2024-12-09T05:15:30,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36393-0x1007532fba90000, quorum=127.0.0.1:55669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:15:30,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f5a0249{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:15:30,760 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59948c0f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:15:30,760 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:15:30,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1897f08a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:15:30,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e2825a4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,STOPPED} 2024-12-09T05:15:30,762 WARN [BP-836600810-172.17.0.2-1733721276277 heartbeating to localhost/127.0.0.1:45367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:15:30,762 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:15:30,762 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:15:30,762 WARN [BP-836600810-172.17.0.2-1733721276277 heartbeating to localhost/127.0.0.1:45367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-836600810-172.17.0.2-1733721276277 (Datanode Uuid c7e19b78-5c7b-48e4-acaa-6b7bcee5cd38) service to localhost/127.0.0.1:45367 2024-12-09T05:15:30,762 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@8ec3cf {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-836600810-172.17.0.2-1733721276277:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39683,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:33617 , LocalHost:localPort 41a709354867/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T05:15:30,762 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@8ec3cf {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-836600810-172.17.0.2-1733721276277:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:42445,null,null], DatanodeInfoWithStorage[127.0.0.1:39683,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-836600810-172.17.0.2-1733721276277 2024-12-09T05:15:30,763 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data3/current/BP-836600810-172.17.0.2-1733721276277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:15:30,763 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data4/current/BP-836600810-172.17.0.2-1733721276277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:15:30,763 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:15:30,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77a342b4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:15:30,765 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ce13758{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:15:30,765 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:15:30,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@462ef7f0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:15:30,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26ff292e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,STOPPED} 2024-12-09T05:15:30,767 WARN [BP-836600810-172.17.0.2-1733721276277 heartbeating to localhost/127.0.0.1:45367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:15:30,767 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:15:30,767 WARN [BP-836600810-172.17.0.2-1733721276277 heartbeating to localhost/127.0.0.1:45367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-836600810-172.17.0.2-1733721276277 (Datanode Uuid 8bc934f7-5434-4876-97e3-9b395251647a) service to localhost/127.0.0.1:45367 2024-12-09T05:15:30,767 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:15:30,767 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data9/current/BP-836600810-172.17.0.2-1733721276277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:15:30,768 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/cluster_70acbccb-fb0c-b9e4-c9ab-b55926edc780/dfs/data/data10/current/BP-836600810-172.17.0.2-1733721276277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:15:30,768 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:15:30,774 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39e90629{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:15:30,774 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e4a5ed5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:15:30,774 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:15:30,775 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c54ccd5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:15:30,775 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@422490c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir/,STOPPED} 2024-12-09T05:15:30,783 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-09T05:15:30,811 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-09T05:15:30,819 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=86 (was 64) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:45367 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45367 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:45367 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45367 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45367 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1179732672) connection to localhost/127.0.0.1:45367 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45367 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (1179732672) connection to localhost/127.0.0.1:45367 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$792/0x00007fdbe0b63210.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1179732672) connection to localhost/127.0.0.1:45367 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=428 (was 406) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=173 (was 167) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8200 (was 8774) 2024-12-09T05:15:30,825 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=86, OpenFileDescriptor=428, MaxFileDescriptor=1048576, SystemLoadAverage=173, ProcessCount=11, AvailableMemoryMB=8199 2024-12-09T05:15:30,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T05:15:30,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.log.dir so I do NOT create it in target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615 2024-12-09T05:15:30,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/10c1ac84-2042-6399-e31a-23c64b2c6c90/hadoop.tmp.dir so I do NOT create it in target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615 2024-12-09T05:15:30,826 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2, deleteOnExit=true 2024-12-09T05:15:30,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-09T05:15:30,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/test.cache.data in system properties and HBase conf 2024-12-09T05:15:30,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T05:15:30,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir in system properties and HBase conf 2024-12-09T05:15:30,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T05:15:30,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T05:15:30,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-09T05:15:30,826 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/nfs.dump.dir in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/java.io.tmpdir in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T05:15:30,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T05:15:30,840 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T05:15:30,908 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:15:30,913 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:15:30,914 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:15:30,914 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:15:30,914 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:15:30,915 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:15:30,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79df6632{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:15:30,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71387d7e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:15:31,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@442acd84{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/java.io.tmpdir/jetty-localhost-45803-hadoop-hdfs-3_4_1-tests_jar-_-any-3648218114485607969/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:15:31,033 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e97bdb5{HTTP/1.1, (http/1.1)}{localhost:45803} 2024-12-09T05:15:31,033 INFO [Time-limited test {}] server.Server(415): Started @172587ms 2024-12-09T05:15:31,046 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T05:15:31,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:31,113 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:15:31,117 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:15:31,119 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:15:31,119 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:15:31,119 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:15:31,120 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f4caf6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:15:31,120 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f6f840c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:15:31,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e1ccefb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/java.io.tmpdir/jetty-localhost-37471-hadoop-hdfs-3_4_1-tests_jar-_-any-13188302356853181420/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:15:31,238 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ffe2ba9{HTTP/1.1, (http/1.1)}{localhost:37471} 2024-12-09T05:15:31,238 INFO [Time-limited test {}] server.Server(415): Started @172792ms 2024-12-09T05:15:31,239 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:15:31,273 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:15:31,278 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:15:31,279 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:15:31,279 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:15:31,279 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:15:31,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d20f384{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:15:31,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54b6118b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:15:31,330 WARN [Thread-932 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data1/current/BP-40306056-172.17.0.2-1733721330858/current, will proceed with Du for space computation calculation, 2024-12-09T05:15:31,330 WARN [Thread-933 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data2/current/BP-40306056-172.17.0.2-1733721330858/current, will proceed with Du for space computation calculation, 2024-12-09T05:15:31,353 WARN [Thread-911 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:15:31,355 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbb44f121fff49769 with lease ID 0xc097025a327c3c27: Processing first storage report for DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047 from datanode DatanodeRegistration(127.0.0.1:44021, datanodeUuid=c69f82dd-8719-4a3a-8171-c3c34f68b7c2, infoPort=45047, infoSecurePort=0, ipcPort=44399, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858) 2024-12-09T05:15:31,355 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb44f121fff49769 with lease ID 0xc097025a327c3c27: from storage DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047 node DatanodeRegistration(127.0.0.1:44021, datanodeUuid=c69f82dd-8719-4a3a-8171-c3c34f68b7c2, infoPort=45047, infoSecurePort=0, ipcPort=44399, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:15:31,356 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbb44f121fff49769 with lease ID 0xc097025a327c3c27: Processing first storage report for DS-76b68192-e430-4550-8b69-3c049d580d59 from datanode DatanodeRegistration(127.0.0.1:44021, datanodeUuid=c69f82dd-8719-4a3a-8171-c3c34f68b7c2, infoPort=45047, infoSecurePort=0, ipcPort=44399, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858) 2024-12-09T05:15:31,356 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb44f121fff49769 with lease ID 0xc097025a327c3c27: from storage DS-76b68192-e430-4550-8b69-3c049d580d59 node DatanodeRegistration(127.0.0.1:44021, datanodeUuid=c69f82dd-8719-4a3a-8171-c3c34f68b7c2, infoPort=45047, infoSecurePort=0, ipcPort=44399, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:15:31,399 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a37ea72{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/java.io.tmpdir/jetty-localhost-39891-hadoop-hdfs-3_4_1-tests_jar-_-any-6661504751504809301/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:15:31,399 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41261d7d{HTTP/1.1, (http/1.1)}{localhost:39891} 2024-12-09T05:15:31,399 INFO [Time-limited test {}] server.Server(415): Started @172953ms 2024-12-09T05:15:31,400 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:15:31,493 WARN [Thread-959 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data4/current/BP-40306056-172.17.0.2-1733721330858/current, will proceed with Du for space computation calculation, 2024-12-09T05:15:31,493 WARN [Thread-958 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data3/current/BP-40306056-172.17.0.2-1733721330858/current, will proceed with Du for space computation calculation, 2024-12-09T05:15:31,515 WARN [Thread-947 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:15:31,518 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab909f48d55513e8 with lease ID 0xc097025a327c3c28: Processing first storage report for DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb from datanode DatanodeRegistration(127.0.0.1:33101, datanodeUuid=671485d7-3754-4431-9e32-a43a25d06301, infoPort=33449, infoSecurePort=0, ipcPort=41649, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858) 2024-12-09T05:15:31,518 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab909f48d55513e8 with lease ID 0xc097025a327c3c28: from storage DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb node DatanodeRegistration(127.0.0.1:33101, datanodeUuid=671485d7-3754-4431-9e32-a43a25d06301, infoPort=33449, infoSecurePort=0, ipcPort=41649, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:15:31,518 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab909f48d55513e8 with lease ID 0xc097025a327c3c28: Processing first storage report for DS-c58ae600-4914-43f3-b90a-8ad6dec74458 from datanode DatanodeRegistration(127.0.0.1:33101, datanodeUuid=671485d7-3754-4431-9e32-a43a25d06301, infoPort=33449, infoSecurePort=0, ipcPort=41649, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858) 2024-12-09T05:15:31,518 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab909f48d55513e8 with lease ID 0xc097025a327c3c28: from storage DS-c58ae600-4914-43f3-b90a-8ad6dec74458 node DatanodeRegistration(127.0.0.1:33101, datanodeUuid=671485d7-3754-4431-9e32-a43a25d06301, infoPort=33449, infoSecurePort=0, ipcPort=41649, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:15:31,524 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615 2024-12-09T05:15:31,528 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/zookeeper_0, clientPort=63582, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T05:15:31,529 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=63582 2024-12-09T05:15:31,529 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:15:31,531 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:15:31,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44021 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:15:31,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:15:31,542 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca with version=8 2024-12-09T05:15:31,542 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/hbase-staging 2024-12-09T05:15:31,543 INFO [Time-limited test {}] client.ConnectionUtils(129): master/41a709354867:0 server-side Connection retries=45 2024-12-09T05:15:31,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:15:31,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:15:31,544 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:15:31,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:15:31,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:15:31,544 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:15:31,544 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:15:31,545 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36903 2024-12-09T05:15:31,545 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:15:31,546 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:15:31,549 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:36903 connecting to ZooKeeper ensemble=127.0.0.1:63582 2024-12-09T05:15:31,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:369030x0, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:15:31,557 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36903-0x1007533d02c0000 connected 2024-12-09T05:15:31,581 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:15:31,581 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:15:31,582 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:15:31,584 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36903 2024-12-09T05:15:31,584 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36903 2024-12-09T05:15:31,584 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36903 2024-12-09T05:15:31,587 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36903 2024-12-09T05:15:31,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36903 2024-12-09T05:15:31,588 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca, hbase.cluster.distributed=false 2024-12-09T05:15:31,604 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/41a709354867:0 server-side Connection retries=45 2024-12-09T05:15:31,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:15:31,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:15:31,604 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:15:31,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:15:31,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:15:31,604 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:15:31,604 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:15:31,605 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41341 2024-12-09T05:15:31,605 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:15:31,607 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:15:31,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:15:31,610 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:15:31,612 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41341 connecting to ZooKeeper ensemble=127.0.0.1:63582 2024-12-09T05:15:31,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413410x0, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:15:31,617 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41341-0x1007533d02c0001 connected 2024-12-09T05:15:31,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:15:31,618 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:15:31,619 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:15:31,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41341 2024-12-09T05:15:31,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41341 2024-12-09T05:15:31,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41341 2024-12-09T05:15:31,625 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41341 2024-12-09T05:15:31,626 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41341 2024-12-09T05:15:31,627 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/41a709354867,36903,1733721331543 2024-12-09T05:15:31,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:15:31,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:15:31,630 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/41a709354867,36903,1733721331543 2024-12-09T05:15:31,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:15:31,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:15:31,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:31,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:31,633 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:15:31,633 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/41a709354867,36903,1733721331543 from backup master directory 2024-12-09T05:15:31,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:15:31,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/41a709354867,36903,1733721331543 2024-12-09T05:15:31,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:15:31,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:15:31,635 WARN [master/41a709354867:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:15:31,635 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=41a709354867,36903,1733721331543 2024-12-09T05:15:31,640 DEBUG [M:0;41a709354867:36903 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;41a709354867:36903 2024-12-09T05:15:31,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44021 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:15:31,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:15:31,651 DEBUG [master/41a709354867:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/hbase.id with ID: 64cd297d-3222-41fb-816a-e76ef7f3460f 2024-12-09T05:15:31,662 INFO [master/41a709354867:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:15:31,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:31,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:31,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44021 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:15:31,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:15:31,678 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:15:31,679 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T05:15:31,680 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:15:31,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:15:31,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44021 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:15:31,690 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store 2024-12-09T05:15:31,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44021 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:15:31,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:15:31,698 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:15:31,698 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:15:31,698 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:15:31,698 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:15:31,698 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:15:31,698 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:15:31,698 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:15:31,698 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:15:31,699 WARN [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/.initializing 2024-12-09T05:15:31,699 DEBUG [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/WALs/41a709354867,36903,1733721331543 2024-12-09T05:15:31,702 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C36903%2C1733721331543, suffix=, logDir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/WALs/41a709354867,36903,1733721331543, archiveDir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/oldWALs, maxLogs=10 2024-12-09T05:15:31,703 INFO [master/41a709354867:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36903%2C1733721331543.1733721331703 2024-12-09T05:15:31,709 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/WALs/41a709354867,36903,1733721331543/41a709354867%2C36903%2C1733721331543.1733721331703 2024-12-09T05:15:31,709 DEBUG [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33449:33449),(127.0.0.1/127.0.0.1:45047:45047)] 2024-12-09T05:15:31,709 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:15:31,710 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:15:31,710 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:15:31,710 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:15:31,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:15:31,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T05:15:31,713 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:31,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:15:31,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:15:31,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T05:15:31,715 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:31,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:15:31,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:15:31,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T05:15:31,717 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:31,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:15:31,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:15:31,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T05:15:31,718 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:31,719 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:15:31,720 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:15:31,720 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:15:31,722 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T05:15:31,723 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:15:31,725 DEBUG [master/41a709354867:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:15:31,726 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728549, jitterRate=-0.07360251247882843}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T05:15:31,726 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:15:31,728 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T05:15:31,731 DEBUG [master/41a709354867:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@546422ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:15:31,732 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-09T05:15:31,732 INFO [master/41a709354867:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T05:15:31,733 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T05:15:31,733 INFO [master/41a709354867:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T05:15:31,733 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T05:15:31,733 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-09T05:15:31,733 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T05:15:31,736 INFO [master/41a709354867:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T05:15:31,736 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T05:15:31,738 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-09T05:15:31,738 INFO [master/41a709354867:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T05:15:31,739 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T05:15:31,740 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-09T05:15:31,740 INFO [master/41a709354867:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T05:15:31,741 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T05:15:31,742 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-09T05:15:31,743 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T05:15:31,745 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T05:15:31,746 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T05:15:31,747 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T05:15:31,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:15:31,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:15:31,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:31,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:31,749 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=41a709354867,36903,1733721331543, sessionid=0x1007533d02c0000, setting cluster-up flag (Was=false) 2024-12-09T05:15:31,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:31,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:31,760 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T05:15:31,761 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=41a709354867,36903,1733721331543 2024-12-09T05:15:31,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:31,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:31,770 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T05:15:31,771 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=41a709354867,36903,1733721331543 2024-12-09T05:15:31,773 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-09T05:15:31,774 INFO [master/41a709354867:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-09T05:15:31,774 INFO [master/41a709354867:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T05:15:31,774 DEBUG [master/41a709354867:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 41a709354867,36903,1733721331543 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T05:15:31,774 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:15:31,774 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:15:31,774 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:15:31,774 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:15:31,774 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/41a709354867:0, corePoolSize=10, maxPoolSize=10 2024-12-09T05:15:31,774 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:15:31,774 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:15:31,774 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:15:31,775 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733721361775 2024-12-09T05:15:31,775 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T05:15:31,776 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T05:15:31,776 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T05:15:31,776 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T05:15:31,776 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T05:15:31,776 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T05:15:31,776 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:31,776 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:15:31,776 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-09T05:15:31,777 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T05:15:31,777 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T05:15:31,777 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T05:15:31,777 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T05:15:31,777 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T05:15:31,777 DEBUG [master/41a709354867:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721331777,5,FailOnTimeoutGroup] 2024-12-09T05:15:31,777 DEBUG [master/41a709354867:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721331777,5,FailOnTimeoutGroup] 2024-12-09T05:15:31,777 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:31,777 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:31,777 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T05:15:31,777 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:31,778 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:31,777 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:15:31,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44021 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:15:31,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:15:31,788 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-09T05:15:31,789 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca 2024-12-09T05:15:31,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44021 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:15:31,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:15:31,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:15:31,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:15:31,798 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:15:31,798 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:31,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:15:31,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:15:31,800 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:15:31,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:31,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:15:31,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:15:31,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:15:31,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:31,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:15:31,803 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740 2024-12-09T05:15:31,804 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740 2024-12-09T05:15:31,805 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T05:15:31,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T05:15:31,809 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:15:31,809 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873634, jitterRate=0.11088348925113678}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T05:15:31,809 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T05:15:31,809 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:15:31,809 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:15:31,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:15:31,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:15:31,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:15:31,810 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T05:15:31,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:15:31,811 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:15:31,811 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-09T05:15:31,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T05:15:31,812 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T05:15:31,813 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T05:15:31,838 DEBUG [RS:0;41a709354867:41341 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;41a709354867:41341 2024-12-09T05:15:31,840 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1008): ClusterId : 64cd297d-3222-41fb-816a-e76ef7f3460f 2024-12-09T05:15:31,840 DEBUG [RS:0;41a709354867:41341 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:15:31,842 DEBUG [RS:0;41a709354867:41341 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:15:31,842 DEBUG [RS:0;41a709354867:41341 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:15:31,844 DEBUG [RS:0;41a709354867:41341 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:15:31,845 DEBUG [RS:0;41a709354867:41341 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d200534, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:15:31,846 DEBUG [RS:0;41a709354867:41341 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67228898, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:15:31,846 INFO [RS:0;41a709354867:41341 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-09T05:15:31,846 INFO [RS:0;41a709354867:41341 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-09T05:15:31,846 DEBUG [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-09T05:15:31,846 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(3073): reportForDuty to master=41a709354867,36903,1733721331543 with isa=41a709354867/172.17.0.2:41341, startcode=1733721331603 2024-12-09T05:15:31,846 DEBUG [RS:0;41a709354867:41341 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:15:31,849 INFO [RS-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43729, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:15:31,849 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36903 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 41a709354867,41341,1733721331603 2024-12-09T05:15:31,849 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36903 {}] master.ServerManager(486): Registering regionserver=41a709354867,41341,1733721331603 2024-12-09T05:15:31,851 DEBUG [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca 2024-12-09T05:15:31,851 DEBUG [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:44921 2024-12-09T05:15:31,851 DEBUG [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-09T05:15:31,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:15:31,854 DEBUG [RS:0;41a709354867:41341 {}] zookeeper.ZKUtil(111): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/41a709354867,41341,1733721331603 2024-12-09T05:15:31,854 WARN [RS:0;41a709354867:41341 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:15:31,854 INFO [RS:0;41a709354867:41341 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:15:31,854 DEBUG [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603 2024-12-09T05:15:31,854 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [41a709354867,41341,1733721331603] 2024-12-09T05:15:31,857 DEBUG [RS:0;41a709354867:41341 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-09T05:15:31,857 INFO [RS:0;41a709354867:41341 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:15:31,859 INFO [RS:0;41a709354867:41341 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:15:31,859 INFO [RS:0;41a709354867:41341 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:15:31,859 INFO [RS:0;41a709354867:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:31,860 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-09T05:15:31,860 INFO [RS:0;41a709354867:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:15:31,861 DEBUG [RS:0;41a709354867:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:15:31,861 INFO [RS:0;41a709354867:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:31,862 INFO [RS:0;41a709354867:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:31,862 INFO [RS:0;41a709354867:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:31,862 INFO [RS:0;41a709354867:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:31,862 INFO [RS:0;41a709354867:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,41341,1733721331603-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:15:31,876 INFO [RS:0;41a709354867:41341 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:15:31,877 INFO [RS:0;41a709354867:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,41341,1733721331603-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:31,891 INFO [RS:0;41a709354867:41341 {}] regionserver.Replication(204): 41a709354867,41341,1733721331603 started 2024-12-09T05:15:31,891 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1767): Serving as 41a709354867,41341,1733721331603, RpcServer on 41a709354867/172.17.0.2:41341, sessionid=0x1007533d02c0001 2024-12-09T05:15:31,891 DEBUG [RS:0;41a709354867:41341 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:15:31,891 DEBUG [RS:0;41a709354867:41341 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 41a709354867,41341,1733721331603 2024-12-09T05:15:31,891 DEBUG [RS:0;41a709354867:41341 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,41341,1733721331603' 2024-12-09T05:15:31,891 DEBUG [RS:0;41a709354867:41341 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:15:31,891 DEBUG [RS:0;41a709354867:41341 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:15:31,892 DEBUG [RS:0;41a709354867:41341 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:15:31,892 DEBUG [RS:0;41a709354867:41341 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:15:31,892 DEBUG [RS:0;41a709354867:41341 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 41a709354867,41341,1733721331603 2024-12-09T05:15:31,892 DEBUG [RS:0;41a709354867:41341 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,41341,1733721331603' 2024-12-09T05:15:31,892 DEBUG [RS:0;41a709354867:41341 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:15:31,892 DEBUG [RS:0;41a709354867:41341 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:15:31,893 DEBUG [RS:0;41a709354867:41341 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:15:31,893 INFO [RS:0;41a709354867:41341 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:15:31,893 INFO [RS:0;41a709354867:41341 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:15:31,964 WARN [41a709354867:36903 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-09T05:15:31,995 INFO [RS:0;41a709354867:41341 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C41341%2C1733721331603, suffix=, logDir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603, archiveDir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/oldWALs, maxLogs=32 2024-12-09T05:15:31,996 INFO [RS:0;41a709354867:41341 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41341%2C1733721331603.1733721331996 2024-12-09T05:15:32,002 INFO [RS:0;41a709354867:41341 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 2024-12-09T05:15:32,002 DEBUG [RS:0;41a709354867:41341 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45047:45047),(127.0.0.1/127.0.0.1:33449:33449)] 2024-12-09T05:15:32,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:32,214 DEBUG [41a709354867:36903 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T05:15:32,214 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=41a709354867,41341,1733721331603 2024-12-09T05:15:32,216 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 41a709354867,41341,1733721331603, state=OPENING 2024-12-09T05:15:32,220 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T05:15:32,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:32,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:32,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=41a709354867,41341,1733721331603}] 2024-12-09T05:15:32,222 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:15:32,222 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:15:32,375 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 41a709354867,41341,1733721331603 2024-12-09T05:15:32,375 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T05:15:32,377 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T05:15:32,381 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-09T05:15:32,381 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:15:32,383 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C41341%2C1733721331603.meta, suffix=.meta, logDir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603, archiveDir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/oldWALs, maxLogs=32 2024-12-09T05:15:32,384 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41341%2C1733721331603.meta.1733721332384.meta 2024-12-09T05:15:32,393 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.meta.1733721332384.meta 2024-12-09T05:15:32,394 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33449:33449),(127.0.0.1/127.0.0.1:45047:45047)] 2024-12-09T05:15:32,394 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:15:32,394 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T05:15:32,394 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T05:15:32,394 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T05:15:32,394 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T05:15:32,394 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:15:32,394 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-09T05:15:32,394 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-09T05:15:32,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:15:32,397 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:15:32,397 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:32,397 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:15:32,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:15:32,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:15:32,398 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:32,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:15:32,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:15:32,400 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:15:32,400 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:32,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:15:32,401 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740 2024-12-09T05:15:32,402 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740 2024-12-09T05:15:32,403 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T05:15:32,405 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T05:15:32,405 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713638, jitterRate=-0.09256279468536377}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T05:15:32,406 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T05:15:32,406 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733721332375 2024-12-09T05:15:32,408 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T05:15:32,408 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-09T05:15:32,409 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,41341,1733721331603 2024-12-09T05:15:32,410 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 41a709354867,41341,1733721331603, state=OPEN 2024-12-09T05:15:32,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:15:32,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:15:32,413 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:15:32,413 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:15:32,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T05:15:32,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=41a709354867,41341,1733721331603 in 191 msec 2024-12-09T05:15:32,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T05:15:32,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-12-09T05:15:32,420 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 647 msec 2024-12-09T05:15:32,420 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733721332420, completionTime=-1 2024-12-09T05:15:32,420 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T05:15:32,420 DEBUG [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-09T05:15:32,421 DEBUG [hconnection-0x6dbcf8ef-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:15:32,422 INFO [RS-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51534, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:15:32,423 INFO [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-09T05:15:32,423 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733721392423 2024-12-09T05:15:32,423 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733721452423 2024-12-09T05:15:32,423 INFO [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-09T05:15:32,430 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36903,1733721331543-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:32,430 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36903,1733721331543-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:32,430 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36903,1733721331543-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:32,430 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-41a709354867:36903, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:32,430 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:32,431 INFO [master/41a709354867:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-09T05:15:32,431 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:15:32,432 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-09T05:15:32,432 DEBUG [master/41a709354867:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-09T05:15:32,433 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:15:32,433 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:32,434 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:15:32,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44021 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:15:32,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:15:32,446 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3981d7db47fabc187c8597d8f3f9ada3, NAME => 'hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca 2024-12-09T05:15:32,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:15:32,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44021 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:15:32,453 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:15:32,453 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 3981d7db47fabc187c8597d8f3f9ada3, disabling compactions & flushes 2024-12-09T05:15:32,453 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:15:32,453 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:15:32,453 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. after waiting 0 ms 2024-12-09T05:15:32,453 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:15:32,453 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:15:32,453 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3981d7db47fabc187c8597d8f3f9ada3: 2024-12-09T05:15:32,455 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:15:32,455 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733721332455"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721332455"}]},"ts":"1733721332455"} 2024-12-09T05:15:32,457 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T05:15:32,458 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:15:32,458 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721332458"}]},"ts":"1733721332458"} 2024-12-09T05:15:32,460 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-09T05:15:32,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3981d7db47fabc187c8597d8f3f9ada3, ASSIGN}] 2024-12-09T05:15:32,464 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3981d7db47fabc187c8597d8f3f9ada3, ASSIGN 2024-12-09T05:15:32,465 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=3981d7db47fabc187c8597d8f3f9ada3, ASSIGN; state=OFFLINE, location=41a709354867,41341,1733721331603; forceNewPlan=false, retain=false 2024-12-09T05:15:32,616 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3981d7db47fabc187c8597d8f3f9ada3, regionState=OPENING, regionLocation=41a709354867,41341,1733721331603 2024-12-09T05:15:32,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 3981d7db47fabc187c8597d8f3f9ada3, server=41a709354867,41341,1733721331603}] 2024-12-09T05:15:32,770 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 41a709354867,41341,1733721331603 2024-12-09T05:15:32,774 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:15:32,774 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 3981d7db47fabc187c8597d8f3f9ada3, NAME => 'hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:15:32,775 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 3981d7db47fabc187c8597d8f3f9ada3 2024-12-09T05:15:32,775 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:15:32,775 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 3981d7db47fabc187c8597d8f3f9ada3 2024-12-09T05:15:32,775 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 3981d7db47fabc187c8597d8f3f9ada3 2024-12-09T05:15:32,776 INFO [StoreOpener-3981d7db47fabc187c8597d8f3f9ada3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3981d7db47fabc187c8597d8f3f9ada3 2024-12-09T05:15:32,778 INFO [StoreOpener-3981d7db47fabc187c8597d8f3f9ada3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3981d7db47fabc187c8597d8f3f9ada3 columnFamilyName info 2024-12-09T05:15:32,778 DEBUG [StoreOpener-3981d7db47fabc187c8597d8f3f9ada3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:32,778 INFO [StoreOpener-3981d7db47fabc187c8597d8f3f9ada3-1 {}] regionserver.HStore(327): Store=3981d7db47fabc187c8597d8f3f9ada3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:15:32,779 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/namespace/3981d7db47fabc187c8597d8f3f9ada3 2024-12-09T05:15:32,779 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/namespace/3981d7db47fabc187c8597d8f3f9ada3 2024-12-09T05:15:32,781 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 3981d7db47fabc187c8597d8f3f9ada3 2024-12-09T05:15:32,783 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/namespace/3981d7db47fabc187c8597d8f3f9ada3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:15:32,784 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 3981d7db47fabc187c8597d8f3f9ada3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856585, jitterRate=0.08920460939407349}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:15:32,784 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 3981d7db47fabc187c8597d8f3f9ada3: 2024-12-09T05:15:32,785 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3., pid=6, masterSystemTime=1733721332770 2024-12-09T05:15:32,787 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:15:32,787 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:15:32,788 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3981d7db47fabc187c8597d8f3f9ada3, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,41341,1733721331603 2024-12-09T05:15:32,792 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T05:15:32,793 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 3981d7db47fabc187c8597d8f3f9ada3, server=41a709354867,41341,1733721331603 in 172 msec 2024-12-09T05:15:32,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T05:15:32,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=3981d7db47fabc187c8597d8f3f9ada3, ASSIGN in 329 msec 2024-12-09T05:15:32,795 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:15:32,796 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721332796"}]},"ts":"1733721332796"} 2024-12-09T05:15:32,797 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-09T05:15:32,801 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:15:32,802 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 370 msec 2024-12-09T05:15:32,833 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-09T05:15:32,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:15:32,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:32,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:15:32,839 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-09T05:15:32,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:15:32,850 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 11 msec 2024-12-09T05:15:32,861 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-09T05:15:32,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:15:32,872 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 11 msec 2024-12-09T05:15:32,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-09T05:15:32,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-09T05:15:32,888 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.253sec 2024-12-09T05:15:32,888 INFO [master/41a709354867:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T05:15:32,888 INFO [master/41a709354867:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T05:15:32,888 INFO [master/41a709354867:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T05:15:32,888 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T05:15:32,888 INFO [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T05:15:32,889 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36903,1733721331543-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:15:32,889 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36903,1733721331543-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T05:15:32,890 DEBUG [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-09T05:15:32,890 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T05:15:32,891 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36903,1733721331543-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:15:32,929 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d7738ef to 127.0.0.1:63582 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@21fe665f 2024-12-09T05:15:32,932 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d4e14f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:15:32,934 DEBUG [hconnection-0x1fd04f9a-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:15:32,935 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52816, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:15:32,937 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=41a709354867,36903,1733721331543 2024-12-09T05:15:32,937 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:15:32,941 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-09T05:15:32,941 INFO [Time-limited test {}] wal.TestLogRolling(297): Starting testLogRollOnPipelineRestart 2024-12-09T05:15:32,941 INFO [Time-limited test {}] wal.TestLogRolling(300): Replication=2 2024-12-09T05:15:32,942 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T05:15:32,945 INFO [RS-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58128, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T05:15:32,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36903 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T05:15:32,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36903 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T05:15:32,946 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36903 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:15:32,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36903 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T05:15:32,948 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:15:32,949 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:32,949 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36903 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 9 2024-12-09T05:15:32,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36903 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T05:15:32,950 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:15:32,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44021 is added to blk_1073741837_1013 (size=395) 2024-12-09T05:15:32,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741837_1013 (size=395) 2024-12-09T05:15:32,959 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7602233e1695cb4fa966e7403d7d0887, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca 2024-12-09T05:15:32,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741838_1014 (size=78) 2024-12-09T05:15:32,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44021 is added to blk_1073741838_1014 (size=78) 2024-12-09T05:15:32,967 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:15:32,967 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1681): Closing 7602233e1695cb4fa966e7403d7d0887, disabling compactions & flushes 2024-12-09T05:15:32,967 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:15:32,967 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:15:32,967 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. after waiting 0 ms 2024-12-09T05:15:32,967 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:15:32,967 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:15:32,967 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7602233e1695cb4fa966e7403d7d0887: 2024-12-09T05:15:32,968 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:15:32,969 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733721332968"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721332968"}]},"ts":"1733721332968"} 2024-12-09T05:15:32,971 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T05:15:32,972 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:15:32,972 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721332972"}]},"ts":"1733721332972"} 2024-12-09T05:15:32,973 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-09T05:15:32,977 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7602233e1695cb4fa966e7403d7d0887, ASSIGN}] 2024-12-09T05:15:32,978 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7602233e1695cb4fa966e7403d7d0887, ASSIGN 2024-12-09T05:15:32,979 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7602233e1695cb4fa966e7403d7d0887, ASSIGN; state=OFFLINE, location=41a709354867,41341,1733721331603; forceNewPlan=false, retain=false 2024-12-09T05:15:33,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:33,130 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=7602233e1695cb4fa966e7403d7d0887, regionState=OPENING, regionLocation=41a709354867,41341,1733721331603 2024-12-09T05:15:33,132 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 7602233e1695cb4fa966e7403d7d0887, server=41a709354867,41341,1733721331603}] 2024-12-09T05:15:33,285 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 41a709354867,41341,1733721331603 2024-12-09T05:15:33,290 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:15:33,290 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 7602233e1695cb4fa966e7403d7d0887, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:15:33,290 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 7602233e1695cb4fa966e7403d7d0887 2024-12-09T05:15:33,291 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:15:33,291 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 7602233e1695cb4fa966e7403d7d0887 2024-12-09T05:15:33,291 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 7602233e1695cb4fa966e7403d7d0887 2024-12-09T05:15:33,292 INFO [StoreOpener-7602233e1695cb4fa966e7403d7d0887-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7602233e1695cb4fa966e7403d7d0887 2024-12-09T05:15:33,294 INFO [StoreOpener-7602233e1695cb4fa966e7403d7d0887-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7602233e1695cb4fa966e7403d7d0887 columnFamilyName info 2024-12-09T05:15:33,294 DEBUG [StoreOpener-7602233e1695cb4fa966e7403d7d0887-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:15:33,294 INFO [StoreOpener-7602233e1695cb4fa966e7403d7d0887-1 {}] regionserver.HStore(327): Store=7602233e1695cb4fa966e7403d7d0887/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:15:33,295 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/7602233e1695cb4fa966e7403d7d0887 2024-12-09T05:15:33,295 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/7602233e1695cb4fa966e7403d7d0887 2024-12-09T05:15:33,298 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 7602233e1695cb4fa966e7403d7d0887 2024-12-09T05:15:33,300 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/7602233e1695cb4fa966e7403d7d0887/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:15:33,300 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 7602233e1695cb4fa966e7403d7d0887; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=758418, jitterRate=-0.035622239112854004}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:15:33,301 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 7602233e1695cb4fa966e7403d7d0887: 2024-12-09T05:15:33,302 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887., pid=11, masterSystemTime=1733721333284 2024-12-09T05:15:33,304 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:15:33,304 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:15:33,305 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=7602233e1695cb4fa966e7403d7d0887, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,41341,1733721331603 2024-12-09T05:15:33,309 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-09T05:15:33,309 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 7602233e1695cb4fa966e7403d7d0887, server=41a709354867,41341,1733721331603 in 175 msec 2024-12-09T05:15:33,311 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T05:15:33,311 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7602233e1695cb4fa966e7403d7d0887, ASSIGN in 332 msec 2024-12-09T05:15:33,312 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:15:33,312 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721333312"}]},"ts":"1733721333312"} 2024-12-09T05:15:33,313 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-09T05:15:33,316 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:15:33,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 370 msec 2024-12-09T05:15:34,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:35,114 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:35,188 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-09T05:15:35,188 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-09T05:15:35,189 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T05:15:35,189 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-09T05:15:36,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:37,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:37,897 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T05:15:37,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:37,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:37,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:37,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:37,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:37,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:37,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:37,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:15:37,941 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-09T05:15:37,941 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-09T05:15:38,117 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:39,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:40,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:41,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:42,120 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:42,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36903 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T05:15:42,951 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart, procId: 9 completed 2024-12-09T05:15:42,954 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T05:15:42,954 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:15:43,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:44,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:44,960 INFO [Time-limited test {}] wal.TestLogRolling(337): log.getCurrentFileName()): hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 2024-12-09T05:15:44,960 WARN [ResponseProcessor for block BP-40306056-172.17.0.2-1733721330858:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-40306056-172.17.0.2-1733721330858:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:44,961 WARN [ResponseProcessor for block BP-40306056-172.17.0.2-1733721330858:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-40306056-172.17.0.2-1733721330858:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-40306056-172.17.0.2-1733721330858:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:33101,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:44,961 WARN [ResponseProcessor for block BP-40306056-172.17.0.2-1733721330858:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-40306056-172.17.0.2-1733721330858:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:44,961 WARN [DataStreamer for file /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/WALs/41a709354867,36903,1733721331543/41a709354867%2C36903%2C1733721331543.1733721331703 block BP-40306056-172.17.0.2-1733721330858:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-40306056-172.17.0.2-1733721330858:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33101,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK], DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33101,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK]) is bad. 2024-12-09T05:15:44,961 WARN [DataStreamer for file /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.meta.1733721332384.meta block BP-40306056-172.17.0.2-1733721330858:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-40306056-172.17.0.2-1733721330858:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33101,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK], DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33101,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK]) is bad. 2024-12-09T05:15:44,961 WARN [DataStreamer for file /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 block BP-40306056-172.17.0.2-1733721330858:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-40306056-172.17.0.2-1733721330858:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK], DatanodeInfoWithStorage[127.0.0.1:33101,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33101,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK]) is bad. 2024-12-09T05:15:44,961 WARN [PacketResponder: BP-40306056-172.17.0.2-1733721330858:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33101] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:44,962 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-752169608_22 at /127.0.0.1:35696 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33101:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35696 dst: /127.0.0.1:33101 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:44,962 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-752169608_22 at /127.0.0.1:46244 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46244 dst: /127.0.0.1:44021 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:44,962 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1556830641_22 at /127.0.0.1:46292 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46292 dst: /127.0.0.1:44021 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:44,962 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1556830641_22 at /127.0.0.1:46280 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46280 dst: /127.0.0.1:44021 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:44,962 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1556830641_22 at /127.0.0.1:35714 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33101:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35714 dst: /127.0.0.1:33101 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:44,962 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1556830641_22 at /127.0.0.1:35716 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33101:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35716 dst: /127.0.0.1:33101 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:44,965 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a37ea72{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:15:44,965 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41261d7d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:15:44,965 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:15:44,966 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54b6118b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:15:44,966 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d20f384{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,STOPPED} 2024-12-09T05:15:44,967 WARN [BP-40306056-172.17.0.2-1733721330858 heartbeating to localhost/127.0.0.1:44921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:15:44,967 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:15:44,967 WARN [BP-40306056-172.17.0.2-1733721330858 heartbeating to localhost/127.0.0.1:44921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-40306056-172.17.0.2-1733721330858 (Datanode Uuid 671485d7-3754-4431-9e32-a43a25d06301) service to localhost/127.0.0.1:44921 2024-12-09T05:15:44,967 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:15:44,967 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data3/current/BP-40306056-172.17.0.2-1733721330858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:15:44,968 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data4/current/BP-40306056-172.17.0.2-1733721330858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:15:44,968 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:15:44,978 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:15:44,983 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:15:44,983 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:15:44,983 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:15:44,983 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:15:44,984 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f6781d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:15:44,984 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dcdba76{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:15:45,110 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@268b2a87{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/java.io.tmpdir/jetty-localhost-35609-hadoop-hdfs-3_4_1-tests_jar-_-any-3348344626706475405/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:15:45,110 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@63bd399f{HTTP/1.1, (http/1.1)}{localhost:35609} 2024-12-09T05:15:45,110 INFO [Time-limited test {}] server.Server(415): Started @186664ms 2024-12-09T05:15:45,112 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:15:45,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:45,128 WARN [ResponseProcessor for block BP-40306056-172.17.0.2-1733721330858:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-40306056-172.17.0.2-1733721330858:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:45,128 WARN [ResponseProcessor for block BP-40306056-172.17.0.2-1733721330858:blk_1073741833_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-40306056-172.17.0.2-1733721330858:blk_1073741833_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:45,128 WARN [ResponseProcessor for block BP-40306056-172.17.0.2-1733721330858:blk_1073741830_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-40306056-172.17.0.2-1733721330858:blk_1073741830_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:45,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1556830641_22 at /127.0.0.1:34622 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34622 dst: /127.0.0.1:44021 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:45,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1556830641_22 at /127.0.0.1:34620 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34620 dst: /127.0.0.1:44021 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:45,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-752169608_22 at /127.0.0.1:34630 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34630 dst: /127.0.0.1:44021 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:15:45,131 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e1ccefb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:15:45,131 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ffe2ba9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:15:45,132 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:15:45,132 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f6f840c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:15:45,132 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f4caf6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,STOPPED} 2024-12-09T05:15:45,133 WARN [BP-40306056-172.17.0.2-1733721330858 heartbeating to localhost/127.0.0.1:44921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:15:45,133 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:15:45,133 WARN [BP-40306056-172.17.0.2-1733721330858 heartbeating to localhost/127.0.0.1:44921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-40306056-172.17.0.2-1733721330858 (Datanode Uuid c69f82dd-8719-4a3a-8171-c3c34f68b7c2) service to localhost/127.0.0.1:44921 2024-12-09T05:15:45,133 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:15:45,134 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data1/current/BP-40306056-172.17.0.2-1733721330858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:15:45,134 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data2/current/BP-40306056-172.17.0.2-1733721330858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:15:45,134 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:15:45,146 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:15:45,150 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:15:45,151 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:15:45,151 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:15:45,151 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:15:45,152 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d3186f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:15:45,152 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d7853cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:15:45,202 WARN [Thread-1093 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:15:45,205 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6935fbbc8097903 with lease ID 0xc097025a327c3c29: from storage DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb node DatanodeRegistration(127.0.0.1:35965, datanodeUuid=671485d7-3754-4431-9e32-a43a25d06301, infoPort=34667, infoSecurePort=0, ipcPort=33591, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:15:45,206 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6935fbbc8097903 with lease ID 0xc097025a327c3c29: from storage DS-c58ae600-4914-43f3-b90a-8ad6dec74458 node DatanodeRegistration(127.0.0.1:35965, datanodeUuid=671485d7-3754-4431-9e32-a43a25d06301, infoPort=34667, infoSecurePort=0, ipcPort=33591, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T05:15:45,275 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a52de59{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/java.io.tmpdir/jetty-localhost-41131-hadoop-hdfs-3_4_1-tests_jar-_-any-3843644601055382265/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:15:45,275 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37abffaf{HTTP/1.1, (http/1.1)}{localhost:41131} 2024-12-09T05:15:45,275 INFO [Time-limited test {}] server.Server(415): Started @186829ms 2024-12-09T05:15:45,277 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:15:45,359 WARN [Thread-1124 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:15:45,362 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c4a68b6c06b189c with lease ID 0xc097025a327c3c2a: from storage DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047 node DatanodeRegistration(127.0.0.1:36995, datanodeUuid=c69f82dd-8719-4a3a-8171-c3c34f68b7c2, infoPort=40487, infoSecurePort=0, ipcPort=44819, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:15:45,364 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c4a68b6c06b189c with lease ID 0xc097025a327c3c2a: from storage DS-76b68192-e430-4550-8b69-3c049d580d59 node DatanodeRegistration(127.0.0.1:36995, datanodeUuid=c69f82dd-8719-4a3a-8171-c3c34f68b7c2, infoPort=40487, infoSecurePort=0, ipcPort=44819, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:15:46,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:46,294 INFO [Time-limited test {}] wal.TestLogRolling(349): Data Nodes restarted 2024-12-09T05:15:46,296 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-09T05:15:46,297 WARN [RS:0;41a709354867:41341.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=5, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:46,297 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C41341%2C1733721331603:(num 1733721331996) roll requested 2024-12-09T05:15:46,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41341 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:46,297 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41341%2C1733721331603.1733721346297 2024-12-09T05:15:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41341 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:52816 deadline: 1733721356296, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-09T05:15:46,303 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 newFile=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 2024-12-09T05:15:46,303 WARN [regionserver/41a709354867:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-09T05:15:46,304 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 with entries=5, filesize=2.09 KB; new WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 2024-12-09T05:15:46,304 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34667:34667),(127.0.0.1/127.0.0.1:40487:40487)] 2024-12-09T05:15:46,304 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 is not closed yet, will try archiving it next time 2024-12-09T05:15:46,304 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:46,304 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:15:46,304 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 2024-12-09T05:15:46,305 WARN [IPC Server handler 2 on default port 44921 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1017 2024-12-09T05:15:46,305 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 after 1ms 2024-12-09T05:15:47,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:48,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:49,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:49,206 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1017: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T05:15:50,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:50,306 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 after 4001ms 2024-12-09T05:15:51,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:52,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:53,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:54,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:55,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:56,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:57,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:58,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:15:58,335 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-09T05:15:59,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:00,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:00,338 WARN [ResponseProcessor for block BP-40306056-172.17.0.2-1733721330858:blk_1073741839_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-40306056-172.17.0.2-1733721330858:blk_1073741839_1018 java.io.IOException: Bad response ERROR for BP-40306056-172.17.0.2-1733721330858:blk_1073741839_1018 from datanode DatanodeInfoWithStorage[127.0.0.1:36995,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:00,338 WARN [DataStreamer for file /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 block BP-40306056-172.17.0.2-1733721330858:blk_1073741839_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-40306056-172.17.0.2-1733721330858:blk_1073741839_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35965,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK], DatanodeInfoWithStorage[127.0.0.1:36995,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36995,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]) is bad. 2024-12-09T05:16:00,338 WARN [PacketResponder: BP-40306056-172.17.0.2-1733721330858:blk_1073741839_1018, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36995] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:16:00,339 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1556830641_22 at /127.0.0.1:36084 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:35965:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36084 dst: /127.0.0.1:35965 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:16:00,339 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1556830641_22 at /127.0.0.1:55660 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:36995:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55660 dst: /127.0.0.1:36995 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:16:00,342 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a52de59{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:16:00,342 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37abffaf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:16:00,342 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:16:00,343 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d7853cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:16:00,343 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d3186f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,STOPPED} 2024-12-09T05:16:00,344 WARN [BP-40306056-172.17.0.2-1733721330858 heartbeating to localhost/127.0.0.1:44921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:16:00,344 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:16:00,344 WARN [BP-40306056-172.17.0.2-1733721330858 heartbeating to localhost/127.0.0.1:44921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-40306056-172.17.0.2-1733721330858 (Datanode Uuid c69f82dd-8719-4a3a-8171-c3c34f68b7c2) service to localhost/127.0.0.1:44921 2024-12-09T05:16:00,344 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:16:00,345 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data1/current/BP-40306056-172.17.0.2-1733721330858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:16:00,345 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data2/current/BP-40306056-172.17.0.2-1733721330858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:16:00,345 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:16:00,358 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:16:00,361 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:16:00,362 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:16:00,362 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:16:00,362 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:16:00,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@273c90ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:16:00,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e035882{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:16:00,478 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3cac22ce{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/java.io.tmpdir/jetty-localhost-34471-hadoop-hdfs-3_4_1-tests_jar-_-any-3412841454659731867/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:16:00,479 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1591b4e7{HTTP/1.1, (http/1.1)}{localhost:34471} 2024-12-09T05:16:00,479 INFO [Time-limited test {}] server.Server(415): Started @202032ms 2024-12-09T05:16:00,480 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:16:00,497 WARN [ResponseProcessor for block BP-40306056-172.17.0.2-1733721330858:blk_1073741839_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-40306056-172.17.0.2-1733721330858:blk_1073741839_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:00,498 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1556830641_22 at /127.0.0.1:58890 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:35965:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58890 dst: /127.0.0.1:35965 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:16:00,500 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@268b2a87{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:16:00,500 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@63bd399f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:16:00,500 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:16:00,501 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dcdba76{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:16:00,501 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f6781d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,STOPPED} 2024-12-09T05:16:00,503 WARN [BP-40306056-172.17.0.2-1733721330858 heartbeating to localhost/127.0.0.1:44921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:16:00,503 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:16:00,503 WARN [BP-40306056-172.17.0.2-1733721330858 heartbeating to localhost/127.0.0.1:44921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-40306056-172.17.0.2-1733721330858 (Datanode Uuid 671485d7-3754-4431-9e32-a43a25d06301) service to localhost/127.0.0.1:44921 2024-12-09T05:16:00,503 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:16:00,503 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data3/current/BP-40306056-172.17.0.2-1733721330858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:16:00,504 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data4/current/BP-40306056-172.17.0.2-1733721330858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:16:00,504 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:16:00,519 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:16:00,523 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:16:00,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:16:00,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:16:00,525 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:16:00,528 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68ed5a7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:16:00,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d456551{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:16:00,571 WARN [Thread-1168 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:16:00,574 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33aa1c5ef4888b9b with lease ID 0xc097025a327c3c2b: from storage DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047 node DatanodeRegistration(127.0.0.1:39035, datanodeUuid=c69f82dd-8719-4a3a-8171-c3c34f68b7c2, infoPort=45837, infoSecurePort=0, ipcPort=42019, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:16:00,574 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33aa1c5ef4888b9b with lease ID 0xc097025a327c3c2b: from storage DS-76b68192-e430-4550-8b69-3c049d580d59 node DatanodeRegistration(127.0.0.1:39035, datanodeUuid=c69f82dd-8719-4a3a-8171-c3c34f68b7c2, infoPort=45837, infoSecurePort=0, ipcPort=42019, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:16:00,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70d2b30{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/java.io.tmpdir/jetty-localhost-35285-hadoop-hdfs-3_4_1-tests_jar-_-any-4470094150157128024/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:16:00,664 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@153685b4{HTTP/1.1, (http/1.1)}{localhost:35285} 2024-12-09T05:16:00,664 INFO [Time-limited test {}] server.Server(415): Started @202218ms 2024-12-09T05:16:00,665 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:16:00,755 WARN [Thread-1199 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:16:00,758 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb24f3c3b678e7b2 with lease ID 0xc097025a327c3c2c: from storage DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb node DatanodeRegistration(127.0.0.1:44899, datanodeUuid=671485d7-3754-4431-9e32-a43a25d06301, infoPort=34415, infoSecurePort=0, ipcPort=39123, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:16:00,758 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb24f3c3b678e7b2 with lease ID 0xc097025a327c3c2c: from storage DS-c58ae600-4914-43f3-b90a-8ad6dec74458 node DatanodeRegistration(127.0.0.1:44899, datanodeUuid=671485d7-3754-4431-9e32-a43a25d06301, infoPort=34415, infoSecurePort=0, ipcPort=39123, storageInfo=lv=-57;cid=testClusterID;nsid=56560426;c=1733721330858), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:16:01,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:01,523 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T05:16:01,684 INFO [Time-limited test {}] wal.TestLogRolling(366): Data Nodes restarted 2024-12-09T05:16:01,685 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-09T05:16:01,686 WARN [RS:0;41a709354867:41341.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=8, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35965,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:01,687 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C41341%2C1733721331603:(num 1733721346297) roll requested 2024-12-09T05:16:01,687 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41341%2C1733721331603.1733721361687 2024-12-09T05:16:01,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41341 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35965,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:01,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41341 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:52816 deadline: 1733721371686, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-09T05:16:01,693 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 newFile=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721361687 2024-12-09T05:16:01,693 WARN [regionserver/41a709354867:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-09T05:16:01,693 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721361687 2024-12-09T05:16:01,694 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34415:34415),(127.0.0.1/127.0.0.1:45837:45837)] 2024-12-09T05:16:01,694 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 is not closed yet, will try archiving it next time 2024-12-09T05:16:01,694 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35965,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:01,694 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35965,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:01,694 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 2024-12-09T05:16:01,694 WARN [IPC Server handler 2 on default port 44921 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1020 2024-12-09T05:16:01,694 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 after 0ms 2024-12-09T05:16:01,775 WARN [master/41a709354867:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=95, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:01,776 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C36903%2C1733721331543:(num 1733721331703) roll requested 2024-12-09T05:16:01,776 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:01,776 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36903%2C1733721331543.1733721361776 2024-12-09T05:16:01,776 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:01,782 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL 2024-12-09T05:16:01,782 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/WALs/41a709354867,36903,1733721331543/41a709354867%2C36903%2C1733721331543.1733721331703 with entries=92, filesize=46.00 KB; new WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/WALs/41a709354867,36903,1733721331543/41a709354867%2C36903%2C1733721331543.1733721361776 2024-12-09T05:16:01,782 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45837:45837),(127.0.0.1/127.0.0.1:34415:34415)] 2024-12-09T05:16:01,782 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/WALs/41a709354867,36903,1733721331543/41a709354867%2C36903%2C1733721331543.1733721331703 is not closed yet, will try archiving it next time 2024-12-09T05:16:01,782 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:01,782 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:01,782 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/WALs/41a709354867,36903,1733721331543/41a709354867%2C36903%2C1733721331543.1733721331703 2024-12-09T05:16:01,783 WARN [IPC Server handler 1 on default port 44921 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/WALs/41a709354867,36903,1733721331543/41a709354867%2C36903%2C1733721331543.1733721331703 has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741830_1016 2024-12-09T05:16:01,783 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/WALs/41a709354867,36903,1733721331543/41a709354867%2C36903%2C1733721331543.1733721331703 after 1ms 2024-12-09T05:16:01,846 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:16:01,847 INFO [RS-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42090, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:16:02,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:03,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:04,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:04,574 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741839_1020: GenerationStamp not matched, existing replica is blk_1073741839_1018 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T05:16:05,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:05,695 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 after 4001ms 2024-12-09T05:16:05,784 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/WALs/41a709354867,36903,1733721331543/41a709354867%2C36903%2C1733721331543.1733721331703 after 4002ms 2024-12-09T05:16:06,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:07,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:07,758 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1016: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T05:16:08,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:08,994 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 688aa77f4712fd33e61f733d63bfbd0a, had cached 0 bytes from a total of 23930 2024-12-09T05:16:09,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:10,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:11,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:12,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:12,891 INFO [master/41a709354867:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T05:16:12,891 INFO [master/41a709354867:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T05:16:13,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:13,779 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41341%2C1733721331603.1733721373779 2024-12-09T05:16:13,785 DEBUG [Time-limited test {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721361687 newFile=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 2024-12-09T05:16:13,786 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721361687 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 2024-12-09T05:16:13,787 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45837:45837),(127.0.0.1/127.0.0.1:34415:34415)] 2024-12-09T05:16:13,787 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721361687 is not closed yet, will try archiving it next time 2024-12-09T05:16:13,787 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 2024-12-09T05:16:13,787 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 2024-12-09T05:16:13,787 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 after 0ms 2024-12-09T05:16:13,787 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 2024-12-09T05:16:13,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39035 is added to blk_1073741840_1021 (size=1264) 2024-12-09T05:16:13,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44899 is added to blk_1073741840_1021 (size=1264) 2024-12-09T05:16:13,794 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733721332785/Put/vlen=162/seqid=0] 2024-12-09T05:16:13,794 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [default/info:d/1733721332843/Put/vlen=9/seqid=0] 2024-12-09T05:16:13,794 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #5: [hbase/info:d/1733721332865/Put/vlen=7/seqid=0] 2024-12-09T05:16:13,794 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733721333301/Put/vlen=218/seqid=0] 2024-12-09T05:16:13,795 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [row1002/info:/1733721342957/Put/vlen=1045/seqid=0] 2024-12-09T05:16:13,795 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721331996 2024-12-09T05:16:13,795 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 2024-12-09T05:16:13,795 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 2024-12-09T05:16:13,795 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 after 0ms 2024-12-09T05:16:13,795 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 2024-12-09T05:16:13,798 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #6: [row1003/info:/1733721356332/Put/vlen=1045/seqid=0] 2024-12-09T05:16:13,799 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #7: [row1004/info:/1733721358336/Put/vlen=1045/seqid=0] 2024-12-09T05:16:13,799 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721346297 2024-12-09T05:16:13,799 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721361687 2024-12-09T05:16:13,799 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721361687 2024-12-09T05:16:13,799 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721361687 after 0ms 2024-12-09T05:16:13,799 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721361687 2024-12-09T05:16:13,802 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #9: [row1005/info:/1733721371778/Put/vlen=1045/seqid=0] 2024-12-09T05:16:13,802 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 2024-12-09T05:16:13,802 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 2024-12-09T05:16:13,803 WARN [IPC Server handler 1 on default port 44921 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741842_1025 2024-12-09T05:16:13,803 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 after 1ms 2024-12-09T05:16:14,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:14,762 WARN [ResponseProcessor for block BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:14,762 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-752169608_22 at /127.0.0.1:43002 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:44899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43002 dst: /127.0.0.1:44899 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:16:14,762 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-752169608_22 at /127.0.0.1:44110 [Receiving block BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:39035:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44110 dst: /127.0.0.1:39035 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:39035 remote=/127.0.0.1:44110]. Total timeout mills is 60000, 59022 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:16:14,763 WARN [DataStreamer for file /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 block BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39035,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK], DatanodeInfoWithStorage[127.0.0.1:44899,DS-42866ff9-e2a4-476d-908a-7cd3b9d83bfb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39035,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]) is bad. 2024-12-09T05:16:14,766 WARN [DataStreamer for file /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 block BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:14,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39035 is added to blk_1073741842_1026 (size=85) 2024-12-09T05:16:15,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:16,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:17,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:17,804 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 after 4002ms 2024-12-09T05:16:17,804 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 2024-12-09T05:16:17,808 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 2024-12-09T05:16:17,809 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.42 KB 2024-12-09T05:16:17,809 WARN [RS_OPEN_META-regionserver/41a709354867:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:17,809 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C41341%2C1733721331603.meta:.meta(num 1733721332384) roll requested 2024-12-09T05:16:17,809 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-09T05:16:17,809 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:17,809 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41341%2C1733721331603.meta.1733721377809.meta 2024-12-09T05:16:17,810 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 7602233e1695cb4fa966e7403d7d0887 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-09T05:16:17,810 WARN [RS:0;41a709354867:41341.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=11, requesting roll of WAL org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:17,810 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 7602233e1695cb4fa966e7403d7d0887: 2024-12-09T05:16:17,810 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=11, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=11, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:17,811 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 3981d7db47fabc187c8597d8f3f9ada3 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-09T05:16:17,811 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 3981d7db47fabc187c8597d8f3f9ada3: 2024-12-09T05:16:17,811 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=11, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=11, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:17,815 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-09T05:16:17,815 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-09T05:16:17,815 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d7738ef to 127.0.0.1:63582 2024-12-09T05:16:17,815 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:16:17,815 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T05:16:17,815 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1113857191, stopped=false 2024-12-09T05:16:17,815 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=41a709354867,36903,1733721331543 2024-12-09T05:16:17,815 WARN [regionserver/41a709354867:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-09T05:16:17,815 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.meta.1733721332384.meta with entries=11, filesize=3.66 KB; new WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.meta.1733721377809.meta 2024-12-09T05:16:17,816 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34415:34415),(127.0.0.1/127.0.0.1:45837:45837)] 2024-12-09T05:16:17,816 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.meta.1733721332384.meta is not closed yet, will try archiving it next time 2024-12-09T05:16:17,816 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:17,816 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 41a709354867%2C41341%2C1733721331603:(num 1733721373779) roll requested 2024-12-09T05:16:17,816 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44021,DS-8f24ab09-82d8-4b64-82bd-9f6f2138c047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:17,816 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.meta.1733721332384.meta 2024-12-09T05:16:17,816 INFO [regionserver/41a709354867:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C41341%2C1733721331603.1733721377816 2024-12-09T05:16:17,816 WARN [IPC Server handler 3 on default port 44921 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.meta.1733721332384.meta has not been closed. Lease recovery is in progress. RecoveryId = 1028 for block blk_1073741834_1015 2024-12-09T05:16:17,817 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.meta.1733721332384.meta after 1ms 2024-12-09T05:16:17,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:16:17,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:16:17,817 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-09T05:16:17,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:17,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:17,817 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:16:17,818 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,41341,1733721331603' ***** 2024-12-09T05:16:17,818 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-09T05:16:17,818 INFO [RS:0;41a709354867:41341 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:16:17,818 INFO [RS:0;41a709354867:41341 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:16:17,818 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-09T05:16:17,818 INFO [RS:0;41a709354867:41341 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:16:17,818 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(3579): Received CLOSE for 7602233e1695cb4fa966e7403d7d0887 2024-12-09T05:16:17,818 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:16:17,818 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(3579): Received CLOSE for 3981d7db47fabc187c8597d8f3f9ada3 2024-12-09T05:16:17,819 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,41341,1733721331603 2024-12-09T05:16:17,819 DEBUG [RS:0;41a709354867:41341 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:16:17,819 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:16:17,819 INFO [RS:0;41a709354867:41341 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:16:17,819 INFO [RS:0;41a709354867:41341 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:16:17,819 INFO [RS:0;41a709354867:41341 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:16:17,819 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-09T05:16:17,819 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-09T05:16:17,819 DEBUG [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 7602233e1695cb4fa966e7403d7d0887=TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887., 3981d7db47fabc187c8597d8f3f9ada3=hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3.} 2024-12-09T05:16:17,819 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 7602233e1695cb4fa966e7403d7d0887, disabling compactions & flushes 2024-12-09T05:16:17,819 DEBUG [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3981d7db47fabc187c8597d8f3f9ada3, 7602233e1695cb4fa966e7403d7d0887 2024-12-09T05:16:17,819 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:16:17,819 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:16:17,819 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. after waiting 0 ms 2024-12-09T05:16:17,819 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:16:17,819 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:16:17,819 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:16:17,819 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:16:17,820 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 1 ms 2024-12-09T05:16:17,820 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:16:17,820 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 7602233e1695cb4fa966e7403d7d0887 1/1 column families, dataSize=4.20 KB heapSize=4.98 KB 2024-12-09T05:16:17,820 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.89 KB 2024-12-09T05:16:17,820 WARN [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-09T05:16:17,820 WARN [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-09T05:16:17,820 WARN [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-09T05:16:17,820 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 7602233e1695cb4fa966e7403d7d0887: 2024-12-09T05:16:17,820 ERROR [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2808): ***** ABORTING region server 41a709354867,41341,1733721331603: Unrecoverable exception while closing TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. ***** org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=11, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:17,820 ERROR [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2815): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-09T05:16:17,821 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-09T05:16:17,821 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-09T05:16:17,822 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-09T05:16:17,822 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-09T05:16:17,822 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2819): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 656281088 }, "NonHeapMemoryUsage": { "committed": 169541632, "init": 7667712, "max": -1, "used": 167569304 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-12-09T05:16:17,823 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36903 {}] master.MasterRpcServices(626): 41a709354867,41341,1733721331603 reported a fatal error: ***** ABORTING region server 41a709354867,41341,1733721331603: Unrecoverable exception while closing TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. ***** Cause: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=11, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: org.apache.hadoop.ipc.RemoteException(java.io.IOException): Unexpected BlockUCState: BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) at org.apache.hadoop.ipc.Client.call(Client.java:1529) at org.apache.hadoop.ipc.Client.call(Client.java:1426) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) at jdk.proxy2/jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) at jdk.proxy2/jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) at jdk.proxy2/jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) at jdk.proxy2/jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) at jdk.proxy2/jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) at jdk.proxy2/jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) at jdk.proxy2/jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) at jdk.proxy2/jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) 2024-12-09T05:16:17,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3981d7db47fabc187c8597d8f3f9ada3, disabling compactions & flushes 2024-12-09T05:16:17,825 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:16:17,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:16:17,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. after waiting 0 ms 2024-12-09T05:16:17,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:16:17,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3981d7db47fabc187c8597d8f3f9ada3: 2024-12-09T05:16:17,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:16:17,831 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 newFile=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721377816 2024-12-09T05:16:17,831 WARN [regionserver/41a709354867:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=11, requesting roll of WAL 2024-12-09T05:16:17,831 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721377816 2024-12-09T05:16:17,831 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34415:34415),(127.0.0.1/127.0.0.1:45837:45837)] 2024-12-09T05:16:17,831 DEBUG [regionserver/41a709354867:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 is not closed yet, will try archiving it next time 2024-12-09T05:16:17,831 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:17,832 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-40306056-172.17.0.2-1733721330858:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T05:16:17,832 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 2024-12-09T05:16:17,832 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 after 0ms 2024-12-09T05:16:17,833 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.1733721373779 to hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/oldWALs/41a709354867%2C41341%2C1733721331603.1733721373779 2024-12-09T05:16:17,838 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740/.tmp/info/b81ed02822b340be88b96ad116b9a0aa is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887./info:regioninfo/1733721333305/Put/seqid=0 2024-12-09T05:16:17,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44899 is added to blk_1073741845_1030 (size=8268) 2024-12-09T05:16:17,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39035 is added to blk_1073741845_1030 (size=8268) 2024-12-09T05:16:17,845 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.66 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740/.tmp/info/b81ed02822b340be88b96ad116b9a0aa 2024-12-09T05:16:17,864 INFO [regionserver/41a709354867:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:16:17,867 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740/.tmp/table/e6480339a1f34025858c129de9cbe275 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733721333312/Put/seqid=0 2024-12-09T05:16:17,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44899 is added to blk_1073741846_1031 (size=5482) 2024-12-09T05:16:17,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39035 is added to blk_1073741846_1031 (size=5482) 2024-12-09T05:16:17,873 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=244 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740/.tmp/table/e6480339a1f34025858c129de9cbe275 2024-12-09T05:16:17,880 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740/.tmp/info/b81ed02822b340be88b96ad116b9a0aa as hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740/info/b81ed02822b340be88b96ad116b9a0aa 2024-12-09T05:16:17,886 INFO [regionserver/41a709354867:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T05:16:17,886 INFO [regionserver/41a709354867:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T05:16:17,886 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740/info/b81ed02822b340be88b96ad116b9a0aa, entries=20, sequenceid=16, filesize=8.1 K 2024-12-09T05:16:17,887 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740/.tmp/table/e6480339a1f34025858c129de9cbe275 as hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740/table/e6480339a1f34025858c129de9cbe275 2024-12-09T05:16:17,893 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740/table/e6480339a1f34025858c129de9cbe275, entries=4, sequenceid=16, filesize=5.4 K 2024-12-09T05:16:17,894 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.90 KB/2972, heapSize ~5.14 KB/5264, currentSize=0 B/0 for 1588230740 in 74ms, sequenceid=16, compaction requested=false 2024-12-09T05:16:17,900 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-12-09T05:16:17,901 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T05:16:17,901 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T05:16:17,901 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:16:17,901 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T05:16:18,019 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(3579): Received CLOSE for 7602233e1695cb4fa966e7403d7d0887 2024-12-09T05:16:18,020 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(3579): Received CLOSE for 3981d7db47fabc187c8597d8f3f9ada3 2024-12-09T05:16:18,020 DEBUG [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1629): Waiting on 3981d7db47fabc187c8597d8f3f9ada3, 7602233e1695cb4fa966e7403d7d0887 2024-12-09T05:16:18,020 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 7602233e1695cb4fa966e7403d7d0887, disabling compactions & flushes 2024-12-09T05:16:18,020 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:16:18,020 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:16:18,020 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. after waiting 0 ms 2024-12-09T05:16:18,020 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:16:18,020 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 7602233e1695cb4fa966e7403d7d0887: 2024-12-09T05:16:18,020 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnPipelineRestart,,1733721332946.7602233e1695cb4fa966e7403d7d0887. 2024-12-09T05:16:18,020 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3981d7db47fabc187c8597d8f3f9ada3, disabling compactions & flushes 2024-12-09T05:16:18,020 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:16:18,020 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:16:18,020 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. after waiting 0 ms 2024-12-09T05:16:18,020 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:16:18,020 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3981d7db47fabc187c8597d8f3f9ada3: 2024-12-09T05:16:18,020 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:namespace,,1733721332431.3981d7db47fabc187c8597d8f3f9ada3. 2024-12-09T05:16:18,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:18,220 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1624): We were exiting though online regions are not empty, because some regions failed closing 2024-12-09T05:16:18,220 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,41341,1733721331603; all regions closed. 2024-12-09T05:16:18,220 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603 2024-12-09T05:16:18,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39035 is added to blk_1073741843_1027 (size=761) 2024-12-09T05:16:18,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44899 is added to blk_1073741843_1027 (size=761) 2024-12-09T05:16:19,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:19,758 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T05:16:20,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:21,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:21,817 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.meta.1733721332384.meta after 4001ms 2024-12-09T05:16:21,818 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603/41a709354867%2C41341%2C1733721331603.meta.1733721332384.meta to hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/oldWALs/41a709354867%2C41341%2C1733721331603.meta.1733721332384.meta 2024-12-09T05:16:21,820 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/WALs/41a709354867,41341,1733721331603 2024-12-09T05:16:21,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39035 is added to blk_1073741844_1029 (size=93) 2024-12-09T05:16:21,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44899 is added to blk_1073741844_1029 (size=93) 2024-12-09T05:16:21,822 DEBUG [RS:0;41a709354867:41341 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:16:21,822 INFO [RS:0;41a709354867:41341 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:16:21,823 INFO [RS:0;41a709354867:41341 {}] hbase.ChoreService(370): Chore service for: regionserver/41a709354867:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T05:16:21,823 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:16:21,823 INFO [RS:0;41a709354867:41341 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41341 2024-12-09T05:16:21,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/41a709354867,41341,1733721331603 2024-12-09T05:16:21,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:16:21,827 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [41a709354867,41341,1733721331603] 2024-12-09T05:16:21,827 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 41a709354867,41341,1733721331603; numProcessing=1 2024-12-09T05:16:21,828 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/41a709354867,41341,1733721331603 already deleted, retry=false 2024-12-09T05:16:21,828 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 41a709354867,41341,1733721331603 expired; onlineServers=0 2024-12-09T05:16:21,828 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,36903,1733721331543' ***** 2024-12-09T05:16:21,828 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T05:16:21,829 DEBUG [M:0;41a709354867:36903 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a04358d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:16:21,829 INFO [M:0;41a709354867:36903 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,36903,1733721331543 2024-12-09T05:16:21,829 INFO [M:0;41a709354867:36903 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,36903,1733721331543; all regions closed. 2024-12-09T05:16:21,829 DEBUG [M:0;41a709354867:36903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:16:21,829 DEBUG [M:0;41a709354867:36903 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T05:16:21,829 DEBUG [M:0;41a709354867:36903 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T05:16:21,829 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T05:16:21,829 DEBUG [master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721331777 {}] cleaner.HFileCleaner(306): Exit Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721331777,5,FailOnTimeoutGroup] 2024-12-09T05:16:21,829 DEBUG [master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721331777 {}] cleaner.HFileCleaner(306): Exit Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721331777,5,FailOnTimeoutGroup] 2024-12-09T05:16:21,829 INFO [M:0;41a709354867:36903 {}] hbase.ChoreService(370): Chore service for: master/41a709354867:0 had [] on shutdown 2024-12-09T05:16:21,829 DEBUG [M:0;41a709354867:36903 {}] master.HMaster(1733): Stopping service threads 2024-12-09T05:16:21,829 INFO [M:0;41a709354867:36903 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T05:16:21,830 INFO [M:0;41a709354867:36903 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T05:16:21,830 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T05:16:21,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T05:16:21,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:21,831 DEBUG [M:0;41a709354867:36903 {}] zookeeper.ZKUtil(347): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T05:16:21,831 WARN [M:0;41a709354867:36903 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T05:16:21,831 INFO [M:0;41a709354867:36903 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-09T05:16:21,831 INFO [M:0;41a709354867:36903 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T05:16:21,831 DEBUG [M:0;41a709354867:36903 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:16:21,831 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:16:21,831 INFO [M:0;41a709354867:36903 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:16:21,831 DEBUG [M:0;41a709354867:36903 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:16:21,831 DEBUG [M:0;41a709354867:36903 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:16:21,831 DEBUG [M:0;41a709354867:36903 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:16:21,831 INFO [M:0;41a709354867:36903 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.10 KB heapSize=49.26 KB 2024-12-09T05:16:21,847 DEBUG [M:0;41a709354867:36903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3522b427b8f540ba94f57cf9b144c463 is 82, key is hbase:meta,,1/info:regioninfo/1733721332409/Put/seqid=0 2024-12-09T05:16:21,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44899 is added to blk_1073741847_1032 (size=5672) 2024-12-09T05:16:21,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39035 is added to blk_1073741847_1032 (size=5672) 2024-12-09T05:16:21,853 INFO [M:0;41a709354867:36903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3522b427b8f540ba94f57cf9b144c463 2024-12-09T05:16:21,875 DEBUG [M:0;41a709354867:36903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ad3d3949f8b34a679d3f8bf1b8140453 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733721333317/Put/seqid=0 2024-12-09T05:16:21,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39035 is added to blk_1073741848_1033 (size=7470) 2024-12-09T05:16:21,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44899 is added to blk_1073741848_1033 (size=7470) 2024-12-09T05:16:21,882 INFO [M:0;41a709354867:36903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.50 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ad3d3949f8b34a679d3f8bf1b8140453 2024-12-09T05:16:21,901 DEBUG [M:0;41a709354867:36903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2e838192f41e4e43be52615507e54864 is 69, key is 41a709354867,41341,1733721331603/rs:state/1733721331849/Put/seqid=0 2024-12-09T05:16:21,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39035 is added to blk_1073741849_1034 (size=5156) 2024-12-09T05:16:21,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44899 is added to blk_1073741849_1034 (size=5156) 2024-12-09T05:16:21,907 INFO [M:0;41a709354867:36903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2e838192f41e4e43be52615507e54864 2024-12-09T05:16:21,927 DEBUG [M:0;41a709354867:36903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5367de771f8640858ba598ba4f43813c is 52, key is load_balancer_on/state:d/1733721332939/Put/seqid=0 2024-12-09T05:16:21,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:16:21,927 INFO [RS:0;41a709354867:41341 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,41341,1733721331603; zookeeper connection closed. 2024-12-09T05:16:21,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x1007533d02c0001, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:16:21,927 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@18e3d8d2 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@18e3d8d2 2024-12-09T05:16:21,928 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T05:16:21,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39035 is added to blk_1073741850_1035 (size=5056) 2024-12-09T05:16:21,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44899 is added to blk_1073741850_1035 (size=5056) 2024-12-09T05:16:21,933 INFO [M:0;41a709354867:36903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5367de771f8640858ba598ba4f43813c 2024-12-09T05:16:21,939 DEBUG [M:0;41a709354867:36903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3522b427b8f540ba94f57cf9b144c463 as hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3522b427b8f540ba94f57cf9b144c463 2024-12-09T05:16:21,943 INFO [M:0;41a709354867:36903 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3522b427b8f540ba94f57cf9b144c463, entries=8, sequenceid=96, filesize=5.5 K 2024-12-09T05:16:21,944 DEBUG [M:0;41a709354867:36903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ad3d3949f8b34a679d3f8bf1b8140453 as hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ad3d3949f8b34a679d3f8bf1b8140453 2024-12-09T05:16:21,949 INFO [M:0;41a709354867:36903 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ad3d3949f8b34a679d3f8bf1b8140453, entries=11, sequenceid=96, filesize=7.3 K 2024-12-09T05:16:21,950 DEBUG [M:0;41a709354867:36903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2e838192f41e4e43be52615507e54864 as hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2e838192f41e4e43be52615507e54864 2024-12-09T05:16:21,955 INFO [M:0;41a709354867:36903 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2e838192f41e4e43be52615507e54864, entries=1, sequenceid=96, filesize=5.0 K 2024-12-09T05:16:21,956 DEBUG [M:0;41a709354867:36903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5367de771f8640858ba598ba4f43813c as hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5367de771f8640858ba598ba4f43813c 2024-12-09T05:16:21,960 INFO [M:0;41a709354867:36903 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44921/user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5367de771f8640858ba598ba4f43813c, entries=1, sequenceid=96, filesize=4.9 K 2024-12-09T05:16:21,961 INFO [M:0;41a709354867:36903 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.10 KB/41064, heapSize ~49.20 KB/50376, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=96, compaction requested=false 2024-12-09T05:16:21,963 INFO [M:0;41a709354867:36903 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:16:21,963 DEBUG [M:0;41a709354867:36903 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:16:21,963 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/58349bcf-56aa-dc5f-ff2f-d9018f1aa8ca/MasterData/WALs/41a709354867,36903,1733721331543 2024-12-09T05:16:21,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44899 is added to blk_1073741841_1023 (size=757) 2024-12-09T05:16:21,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39035 is added to blk_1073741841_1023 (size=757) 2024-12-09T05:16:21,966 INFO [M:0;41a709354867:36903 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-09T05:16:21,966 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:16:21,966 INFO [M:0;41a709354867:36903 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36903 2024-12-09T05:16:21,968 DEBUG [M:0;41a709354867:36903 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/41a709354867,36903,1733721331543 already deleted, retry=false 2024-12-09T05:16:22,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:16:22,069 INFO [M:0;41a709354867:36903 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,36903,1733721331543; zookeeper connection closed. 2024-12-09T05:16:22,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36903-0x1007533d02c0000, quorum=127.0.0.1:63582, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:16:22,072 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70d2b30{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:16:22,073 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@153685b4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:16:22,073 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:16:22,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d456551{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:16:22,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68ed5a7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,STOPPED} 2024-12-09T05:16:22,075 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:16:22,075 WARN [BP-40306056-172.17.0.2-1733721330858 heartbeating to localhost/127.0.0.1:44921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:16:22,075 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:16:22,075 WARN [BP-40306056-172.17.0.2-1733721330858 heartbeating to localhost/127.0.0.1:44921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-40306056-172.17.0.2-1733721330858 (Datanode Uuid 671485d7-3754-4431-9e32-a43a25d06301) service to localhost/127.0.0.1:44921 2024-12-09T05:16:22,076 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data3/current/BP-40306056-172.17.0.2-1733721330858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:16:22,076 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data4/current/BP-40306056-172.17.0.2-1733721330858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:16:22,077 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:16:22,079 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3cac22ce{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:16:22,079 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1591b4e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:16:22,080 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:16:22,080 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e035882{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:16:22,080 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@273c90ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,STOPPED} 2024-12-09T05:16:22,082 WARN [BP-40306056-172.17.0.2-1733721330858 heartbeating to localhost/127.0.0.1:44921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:16:22,082 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:16:22,082 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:16:22,082 WARN [BP-40306056-172.17.0.2-1733721330858 heartbeating to localhost/127.0.0.1:44921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-40306056-172.17.0.2-1733721330858 (Datanode Uuid c69f82dd-8719-4a3a-8171-c3c34f68b7c2) service to localhost/127.0.0.1:44921 2024-12-09T05:16:22,083 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data1/current/BP-40306056-172.17.0.2-1733721330858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:16:22,083 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/cluster_6b55bf11-bcfa-3f8e-60f7-cbab5af30df2/dfs/data/data2/current/BP-40306056-172.17.0.2-1733721330858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:16:22,083 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:16:22,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@442acd84{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:16:22,092 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e97bdb5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:16:22,093 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:16:22,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71387d7e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:16:22,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79df6632{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir/,STOPPED} 2024-12-09T05:16:22,099 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-09T05:16:22,116 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-09T05:16:22,124 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=100 (was 86) Potentially hanging thread: nioEventLoopGroup-26-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1179732672) connection to localhost/127.0.0.1:44921 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1179732672) connection to localhost/127.0.0.1:44921 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:44921 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1179732672) connection to localhost/127.0.0.1:44921 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-27-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44921 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=446 (was 428) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=166 (was 173), ProcessCount=11 (was 11), AvailableMemoryMB=7996 (was 8199) 2024-12-09T05:16:22,131 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=100, OpenFileDescriptor=446, MaxFileDescriptor=1048576, SystemLoadAverage=166, ProcessCount=11, AvailableMemoryMB=7996 2024-12-09T05:16:22,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T05:16:22,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.log.dir so I do NOT create it in target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff 2024-12-09T05:16:22,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b9eacdd-e3c3-2946-73f4-e8030183c615/hadoop.tmp.dir so I do NOT create it in target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff 2024-12-09T05:16:22,132 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/cluster_86c7bcca-b1c1-5608-0327-531452e5e089, deleteOnExit=true 2024-12-09T05:16:22,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-09T05:16:22,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/test.cache.data in system properties and HBase conf 2024-12-09T05:16:22,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T05:16:22,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/hadoop.log.dir in system properties and HBase conf 2024-12-09T05:16:22,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T05:16:22,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T05:16:22,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-09T05:16:22,133 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T05:16:22,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:16:22,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:16:22,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T05:16:22,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:16:22,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T05:16:22,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T05:16:22,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:16:22,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:16:22,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T05:16:22,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/nfs.dump.dir in system properties and HBase conf 2024-12-09T05:16:22,134 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/java.io.tmpdir in system properties and HBase conf 2024-12-09T05:16:22,134 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:16:22,134 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T05:16:22,134 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T05:16:22,147 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T05:16:22,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:22,213 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:16:22,218 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:16:22,222 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:16:22,222 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:16:22,222 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:16:22,226 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:16:22,226 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67f78a6d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:16:22,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7531f2c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:16:22,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49374f0d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/java.io.tmpdir/jetty-localhost-39243-hadoop-hdfs-3_4_1-tests_jar-_-any-15053602356284258077/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:16:22,350 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e499d4b{HTTP/1.1, (http/1.1)}{localhost:39243} 2024-12-09T05:16:22,350 INFO [Time-limited test {}] server.Server(415): Started @223904ms 2024-12-09T05:16:22,363 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T05:16:22,439 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:16:22,443 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:16:22,444 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:16:22,444 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:16:22,444 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:16:22,445 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29eebd65{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:16:22,445 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@210540b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:16:22,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@51145f43{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/java.io.tmpdir/jetty-localhost-38435-hadoop-hdfs-3_4_1-tests_jar-_-any-9273952113467203489/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:16:22,560 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1efd4b91{HTTP/1.1, (http/1.1)}{localhost:38435} 2024-12-09T05:16:22,561 INFO [Time-limited test {}] server.Server(415): Started @224114ms 2024-12-09T05:16:22,562 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:16:22,601 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:16:22,606 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:16:22,609 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:16:22,609 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:16:22,610 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:16:22,610 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@eb85507{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:16:22,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2bb1ebfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:16:22,649 WARN [Thread-1376 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/cluster_86c7bcca-b1c1-5608-0327-531452e5e089/dfs/data/data2/current/BP-1845418823-172.17.0.2-1733721382165/current, will proceed with Du for space computation calculation, 2024-12-09T05:16:22,649 WARN [Thread-1375 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/cluster_86c7bcca-b1c1-5608-0327-531452e5e089/dfs/data/data1/current/BP-1845418823-172.17.0.2-1733721382165/current, will proceed with Du for space computation calculation, 2024-12-09T05:16:22,676 WARN [Thread-1354 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:16:22,679 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7d4ce4edfd1c6e1b with lease ID 0xfdb56c0221ee9666: Processing first storage report for DS-0ac58533-9181-4685-a503-efa207dcee4a from datanode DatanodeRegistration(127.0.0.1:38741, datanodeUuid=2b4c85bc-a0e6-43e0-bb58-c21762514d0a, infoPort=45715, infoSecurePort=0, ipcPort=43125, storageInfo=lv=-57;cid=testClusterID;nsid=529633586;c=1733721382165) 2024-12-09T05:16:22,680 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d4ce4edfd1c6e1b with lease ID 0xfdb56c0221ee9666: from storage DS-0ac58533-9181-4685-a503-efa207dcee4a node DatanodeRegistration(127.0.0.1:38741, datanodeUuid=2b4c85bc-a0e6-43e0-bb58-c21762514d0a, infoPort=45715, infoSecurePort=0, ipcPort=43125, storageInfo=lv=-57;cid=testClusterID;nsid=529633586;c=1733721382165), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:16:22,680 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7d4ce4edfd1c6e1b with lease ID 0xfdb56c0221ee9666: Processing first storage report for DS-d863ddac-f153-4612-9d09-87a2a3ff3cf1 from datanode DatanodeRegistration(127.0.0.1:38741, datanodeUuid=2b4c85bc-a0e6-43e0-bb58-c21762514d0a, infoPort=45715, infoSecurePort=0, ipcPort=43125, storageInfo=lv=-57;cid=testClusterID;nsid=529633586;c=1733721382165) 2024-12-09T05:16:22,680 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d4ce4edfd1c6e1b with lease ID 0xfdb56c0221ee9666: from storage DS-d863ddac-f153-4612-9d09-87a2a3ff3cf1 node DatanodeRegistration(127.0.0.1:38741, datanodeUuid=2b4c85bc-a0e6-43e0-bb58-c21762514d0a, infoPort=45715, infoSecurePort=0, ipcPort=43125, storageInfo=lv=-57;cid=testClusterID;nsid=529633586;c=1733721382165), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T05:16:22,744 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@542bd7b1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/java.io.tmpdir/jetty-localhost-34729-hadoop-hdfs-3_4_1-tests_jar-_-any-4522899072202364459/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:16:22,744 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f020452{HTTP/1.1, (http/1.1)}{localhost:34729} 2024-12-09T05:16:22,744 INFO [Time-limited test {}] server.Server(415): Started @224298ms 2024-12-09T05:16:22,746 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:16:22,834 WARN [Thread-1401 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/cluster_86c7bcca-b1c1-5608-0327-531452e5e089/dfs/data/data3/current/BP-1845418823-172.17.0.2-1733721382165/current, will proceed with Du for space computation calculation, 2024-12-09T05:16:22,834 WARN [Thread-1402 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/cluster_86c7bcca-b1c1-5608-0327-531452e5e089/dfs/data/data4/current/BP-1845418823-172.17.0.2-1733721382165/current, will proceed with Du for space computation calculation, 2024-12-09T05:16:22,856 WARN [Thread-1390 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:16:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2963ad69004ecc10 with lease ID 0xfdb56c0221ee9667: Processing first storage report for DS-64db8ab0-e508-421d-8940-42868ec1481c from datanode DatanodeRegistration(127.0.0.1:46471, datanodeUuid=a6b5d21d-44cf-49c3-9653-7ae7e2fa3ece, infoPort=43121, infoSecurePort=0, ipcPort=38889, storageInfo=lv=-57;cid=testClusterID;nsid=529633586;c=1733721382165) 2024-12-09T05:16:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2963ad69004ecc10 with lease ID 0xfdb56c0221ee9667: from storage DS-64db8ab0-e508-421d-8940-42868ec1481c node DatanodeRegistration(127.0.0.1:46471, datanodeUuid=a6b5d21d-44cf-49c3-9653-7ae7e2fa3ece, infoPort=43121, infoSecurePort=0, ipcPort=38889, storageInfo=lv=-57;cid=testClusterID;nsid=529633586;c=1733721382165), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:16:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2963ad69004ecc10 with lease ID 0xfdb56c0221ee9667: Processing first storage report for DS-ce419035-7df7-47ef-bd71-a0324dec26e4 from datanode DatanodeRegistration(127.0.0.1:46471, datanodeUuid=a6b5d21d-44cf-49c3-9653-7ae7e2fa3ece, infoPort=43121, infoSecurePort=0, ipcPort=38889, storageInfo=lv=-57;cid=testClusterID;nsid=529633586;c=1733721382165) 2024-12-09T05:16:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2963ad69004ecc10 with lease ID 0xfdb56c0221ee9667: from storage DS-ce419035-7df7-47ef-bd71-a0324dec26e4 node DatanodeRegistration(127.0.0.1:46471, datanodeUuid=a6b5d21d-44cf-49c3-9653-7ae7e2fa3ece, infoPort=43121, infoSecurePort=0, ipcPort=38889, storageInfo=lv=-57;cid=testClusterID;nsid=529633586;c=1733721382165), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:16:22,870 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff 2024-12-09T05:16:22,873 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/cluster_86c7bcca-b1c1-5608-0327-531452e5e089/zookeeper_0, clientPort=64550, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/cluster_86c7bcca-b1c1-5608-0327-531452e5e089/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/cluster_86c7bcca-b1c1-5608-0327-531452e5e089/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T05:16:22,874 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=64550 2024-12-09T05:16:22,874 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:16:22,875 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:16:22,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:16:22,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:16:22,885 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215 with version=8 2024-12-09T05:16:22,885 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/hbase-staging 2024-12-09T05:16:22,887 INFO [Time-limited test {}] client.ConnectionUtils(129): master/41a709354867:0 server-side Connection retries=45 2024-12-09T05:16:22,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:16:22,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:16:22,887 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:16:22,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:16:22,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:16:22,887 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:16:22,887 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:16:22,888 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34009 2024-12-09T05:16:22,888 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:16:22,890 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:16:22,892 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:34009 connecting to ZooKeeper ensemble=127.0.0.1:64550 2024-12-09T05:16:22,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:340090x0, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:16:22,898 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34009-0x100753498bf0000 connected 2024-12-09T05:16:22,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,915 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,915 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,924 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:16:22,924 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:16:22,925 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:16:22,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:22,932 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34009 2024-12-09T05:16:22,934 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34009 2024-12-09T05:16:22,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34009 2024-12-09T05:16:22,940 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34009 2024-12-09T05:16:22,940 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34009 2024-12-09T05:16:22,940 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215, hbase.cluster.distributed=false 2024-12-09T05:16:22,957 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/41a709354867:0 server-side Connection retries=45 2024-12-09T05:16:22,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:16:22,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:16:22,957 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:16:22,958 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:16:22,958 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:16:22,958 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:16:22,958 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:16:22,960 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36791 2024-12-09T05:16:22,961 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:16:22,961 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:16:22,962 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:16:22,964 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:16:22,966 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36791 connecting to ZooKeeper ensemble=127.0.0.1:64550 2024-12-09T05:16:22,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:367910x0, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:16:22,969 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:367910x0, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:16:22,969 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36791-0x100753498bf0001 connected 2024-12-09T05:16:22,970 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:16:22,971 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:16:22,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36791 2024-12-09T05:16:22,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36791 2024-12-09T05:16:22,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36791 2024-12-09T05:16:22,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36791 2024-12-09T05:16:22,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36791 2024-12-09T05:16:22,977 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/41a709354867,34009,1733721382886 2024-12-09T05:16:22,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:16:22,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:16:22,979 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/41a709354867,34009,1733721382886 2024-12-09T05:16:22,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:16:22,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:16:22,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:22,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:22,982 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:16:22,982 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/41a709354867,34009,1733721382886 from backup master directory 2024-12-09T05:16:22,982 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:16:22,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/41a709354867,34009,1733721382886 2024-12-09T05:16:22,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:16:22,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:16:22,984 WARN [master/41a709354867:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:16:22,984 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=41a709354867,34009,1733721382886 2024-12-09T05:16:22,990 DEBUG [M:0;41a709354867:34009 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;41a709354867:34009 2024-12-09T05:16:22,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:16:22,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:16:22,997 DEBUG [master/41a709354867:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/hbase.id with ID: 0411922e-f37e-473d-8474-cb3dd9360e94 2024-12-09T05:16:23,008 INFO [master/41a709354867:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:16:23,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:23,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:23,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:16:23,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:16:23,019 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:16:23,019 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T05:16:23,020 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:16:23,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:16:23,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:16:23,027 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store 2024-12-09T05:16:23,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:16:23,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:16:23,034 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:16:23,034 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:16:23,034 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:16:23,034 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:16:23,034 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:16:23,034 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:16:23,034 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:16:23,034 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:16:23,035 WARN [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/.initializing 2024-12-09T05:16:23,035 DEBUG [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/WALs/41a709354867,34009,1733721382886 2024-12-09T05:16:23,038 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C34009%2C1733721382886, suffix=, logDir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/WALs/41a709354867,34009,1733721382886, archiveDir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/oldWALs, maxLogs=10 2024-12-09T05:16:23,038 INFO [master/41a709354867:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C34009%2C1733721382886.1733721383038 2024-12-09T05:16:23,043 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/WALs/41a709354867,34009,1733721382886/41a709354867%2C34009%2C1733721382886.1733721383038 2024-12-09T05:16:23,043 DEBUG [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43121:43121),(127.0.0.1/127.0.0.1:45715:45715)] 2024-12-09T05:16:23,043 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:16:23,043 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:16:23,043 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:16:23,043 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:16:23,044 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:16:23,046 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T05:16:23,046 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:23,046 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:16:23,046 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:16:23,047 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T05:16:23,048 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:23,048 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:16:23,048 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:16:23,049 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T05:16:23,050 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:23,050 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:16:23,050 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:16:23,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T05:16:23,051 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:23,052 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:16:23,053 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:16:23,053 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:16:23,055 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T05:16:23,056 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:16:23,058 DEBUG [master/41a709354867:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:16:23,058 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729096, jitterRate=-0.07290767133235931}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T05:16:23,059 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:16:23,063 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T05:16:23,066 DEBUG [master/41a709354867:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63ca5bca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:16:23,067 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-09T05:16:23,067 INFO [master/41a709354867:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T05:16:23,067 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T05:16:23,067 INFO [master/41a709354867:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T05:16:23,067 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T05:16:23,068 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-09T05:16:23,068 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T05:16:23,069 INFO [master/41a709354867:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T05:16:23,070 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T05:16:23,071 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-09T05:16:23,071 INFO [master/41a709354867:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T05:16:23,072 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T05:16:23,073 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-09T05:16:23,074 INFO [master/41a709354867:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T05:16:23,077 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T05:16:23,079 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-09T05:16:23,080 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T05:16:23,081 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T05:16:23,082 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T05:16:23,084 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T05:16:23,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:16:23,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:16:23,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:23,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:23,086 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=41a709354867,34009,1733721382886, sessionid=0x100753498bf0000, setting cluster-up flag (Was=false) 2024-12-09T05:16:23,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:23,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:23,093 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T05:16:23,094 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=41a709354867,34009,1733721382886 2024-12-09T05:16:23,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:23,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:23,103 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T05:16:23,104 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=41a709354867,34009,1733721382886 2024-12-09T05:16:23,107 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-09T05:16:23,107 INFO [master/41a709354867:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-09T05:16:23,107 INFO [master/41a709354867:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T05:16:23,108 DEBUG [master/41a709354867:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 41a709354867,34009,1733721382886 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T05:16:23,108 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:16:23,108 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:16:23,108 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:16:23,108 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:16:23,108 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/41a709354867:0, corePoolSize=10, maxPoolSize=10 2024-12-09T05:16:23,108 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:16:23,108 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:16:23,108 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:16:23,110 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733721413110 2024-12-09T05:16:23,110 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T05:16:23,110 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T05:16:23,110 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T05:16:23,111 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T05:16:23,111 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T05:16:23,111 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T05:16:23,111 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:16:23,111 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,111 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-09T05:16:23,111 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T05:16:23,111 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T05:16:23,111 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T05:16:23,112 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T05:16:23,112 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T05:16:23,112 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:23,112 DEBUG [master/41a709354867:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721383112,5,FailOnTimeoutGroup] 2024-12-09T05:16:23,112 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:16:23,112 DEBUG [master/41a709354867:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721383112,5,FailOnTimeoutGroup] 2024-12-09T05:16:23,112 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,112 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T05:16:23,112 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,112 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:16:23,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:16:23,120 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-09T05:16:23,120 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215 2024-12-09T05:16:23,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:16:23,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:16:23,130 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:16:23,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:16:23,133 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:16:23,133 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:23,133 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:16:23,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:16:23,135 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:16:23,135 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:23,135 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:16:23,135 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:16:23,136 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:16:23,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:23,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:16:23,138 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740 2024-12-09T05:16:23,138 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740 2024-12-09T05:16:23,139 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T05:16:23,140 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T05:16:23,143 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:16:23,143 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717919, jitterRate=-0.08711954951286316}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T05:16:23,143 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T05:16:23,143 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:16:23,143 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:16:23,143 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:16:23,143 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:16:23,143 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:16:23,144 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T05:16:23,144 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:16:23,145 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:16:23,145 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-09T05:16:23,145 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T05:16:23,146 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T05:16:23,147 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T05:16:23,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:23,189 DEBUG [RS:0;41a709354867:36791 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;41a709354867:36791 2024-12-09T05:16:23,190 INFO [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(1008): ClusterId : 0411922e-f37e-473d-8474-cb3dd9360e94 2024-12-09T05:16:23,190 DEBUG [RS:0;41a709354867:36791 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:16:23,192 DEBUG [RS:0;41a709354867:36791 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:16:23,192 DEBUG [RS:0;41a709354867:36791 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:16:23,194 DEBUG [RS:0;41a709354867:36791 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:16:23,194 DEBUG [RS:0;41a709354867:36791 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@407baba0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:16:23,194 DEBUG [RS:0;41a709354867:36791 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@115e9955, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:16:23,194 INFO [RS:0;41a709354867:36791 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-09T05:16:23,194 INFO [RS:0;41a709354867:36791 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-09T05:16:23,194 DEBUG [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-09T05:16:23,195 INFO [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(3073): reportForDuty to master=41a709354867,34009,1733721382886 with isa=41a709354867/172.17.0.2:36791, startcode=1733721382956 2024-12-09T05:16:23,195 DEBUG [RS:0;41a709354867:36791 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:16:23,197 INFO [RS-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53977, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:16:23,198 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34009 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 41a709354867,36791,1733721382956 2024-12-09T05:16:23,198 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34009 {}] master.ServerManager(486): Registering regionserver=41a709354867,36791,1733721382956 2024-12-09T05:16:23,199 DEBUG [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215 2024-12-09T05:16:23,199 DEBUG [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:37923 2024-12-09T05:16:23,199 DEBUG [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-09T05:16:23,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:16:23,201 DEBUG [RS:0;41a709354867:36791 {}] zookeeper.ZKUtil(111): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/41a709354867,36791,1733721382956 2024-12-09T05:16:23,201 WARN [RS:0;41a709354867:36791 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:16:23,201 INFO [RS:0;41a709354867:36791 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:16:23,202 DEBUG [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956 2024-12-09T05:16:23,203 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [41a709354867,36791,1733721382956] 2024-12-09T05:16:23,205 DEBUG [RS:0;41a709354867:36791 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-09T05:16:23,205 INFO [RS:0;41a709354867:36791 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:16:23,207 INFO [RS:0;41a709354867:36791 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:16:23,207 INFO [RS:0;41a709354867:36791 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:16:23,207 INFO [RS:0;41a709354867:36791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,208 INFO [RS:0;41a709354867:36791 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-09T05:16:23,209 INFO [RS:0;41a709354867:36791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:16:23,209 DEBUG [RS:0;41a709354867:36791 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:16:23,212 INFO [RS:0;41a709354867:36791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,212 INFO [RS:0;41a709354867:36791 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,212 INFO [RS:0;41a709354867:36791 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,212 INFO [RS:0;41a709354867:36791 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,212 INFO [RS:0;41a709354867:36791 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36791,1733721382956-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:16:23,227 INFO [RS:0;41a709354867:36791 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:16:23,227 INFO [RS:0;41a709354867:36791 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,36791,1733721382956-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,242 INFO [RS:0;41a709354867:36791 {}] regionserver.Replication(204): 41a709354867,36791,1733721382956 started 2024-12-09T05:16:23,242 INFO [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(1767): Serving as 41a709354867,36791,1733721382956, RpcServer on 41a709354867/172.17.0.2:36791, sessionid=0x100753498bf0001 2024-12-09T05:16:23,242 DEBUG [RS:0;41a709354867:36791 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:16:23,242 DEBUG [RS:0;41a709354867:36791 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 41a709354867,36791,1733721382956 2024-12-09T05:16:23,242 DEBUG [RS:0;41a709354867:36791 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,36791,1733721382956' 2024-12-09T05:16:23,242 DEBUG [RS:0;41a709354867:36791 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:16:23,242 DEBUG [RS:0;41a709354867:36791 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:16:23,243 DEBUG [RS:0;41a709354867:36791 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:16:23,243 DEBUG [RS:0;41a709354867:36791 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:16:23,243 DEBUG [RS:0;41a709354867:36791 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 41a709354867,36791,1733721382956 2024-12-09T05:16:23,243 DEBUG [RS:0;41a709354867:36791 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,36791,1733721382956' 2024-12-09T05:16:23,243 DEBUG [RS:0;41a709354867:36791 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:16:23,243 DEBUG [RS:0;41a709354867:36791 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:16:23,243 DEBUG [RS:0;41a709354867:36791 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:16:23,243 INFO [RS:0;41a709354867:36791 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:16:23,243 INFO [RS:0;41a709354867:36791 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:16:23,297 WARN [41a709354867:34009 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-09T05:16:23,345 INFO [RS:0;41a709354867:36791 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C36791%2C1733721382956, suffix=, logDir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956, archiveDir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/oldWALs, maxLogs=32 2024-12-09T05:16:23,346 INFO [RS:0;41a709354867:36791 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36791%2C1733721382956.1733721383346 2024-12-09T05:16:23,357 INFO [RS:0;41a709354867:36791 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.1733721383346 2024-12-09T05:16:23,357 DEBUG [RS:0;41a709354867:36791 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45715:45715),(127.0.0.1/127.0.0.1:43121:43121)] 2024-12-09T05:16:23,428 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T05:16:23,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,450 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:23,548 DEBUG [41a709354867:34009 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T05:16:23,548 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=41a709354867,36791,1733721382956 2024-12-09T05:16:23,549 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 41a709354867,36791,1733721382956, state=OPENING 2024-12-09T05:16:23,552 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T05:16:23,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:23,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:23,554 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=41a709354867,36791,1733721382956}] 2024-12-09T05:16:23,554 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:16:23,554 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:16:23,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 41a709354867,36791,1733721382956 2024-12-09T05:16:23,707 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T05:16:23,709 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54534, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T05:16:23,712 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-09T05:16:23,713 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:16:23,714 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C36791%2C1733721382956.meta, suffix=.meta, logDir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956, archiveDir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/oldWALs, maxLogs=32 2024-12-09T05:16:23,715 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36791%2C1733721382956.meta.1733721383715.meta 2024-12-09T05:16:23,720 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.meta.1733721383715.meta 2024-12-09T05:16:23,720 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45715:45715),(127.0.0.1/127.0.0.1:43121:43121)] 2024-12-09T05:16:23,720 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:16:23,721 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T05:16:23,721 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T05:16:23,721 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T05:16:23,721 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T05:16:23,721 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:16:23,721 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-09T05:16:23,721 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-09T05:16:23,722 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:16:23,723 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:16:23,723 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:23,723 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:16:23,724 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:16:23,724 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:16:23,724 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:23,725 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:16:23,725 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:16:23,725 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:16:23,725 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:23,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:16:23,726 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740 2024-12-09T05:16:23,727 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740 2024-12-09T05:16:23,729 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T05:16:23,730 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T05:16:23,731 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759515, jitterRate=-0.03422689437866211}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T05:16:23,731 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T05:16:23,732 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733721383706 2024-12-09T05:16:23,734 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T05:16:23,734 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-09T05:16:23,734 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,36791,1733721382956 2024-12-09T05:16:23,735 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 41a709354867,36791,1733721382956, state=OPEN 2024-12-09T05:16:23,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:16:23,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:16:23,740 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:16:23,740 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:16:23,742 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T05:16:23,742 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=41a709354867,36791,1733721382956 in 186 msec 2024-12-09T05:16:23,743 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T05:16:23,743 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 597 msec 2024-12-09T05:16:23,745 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 639 msec 2024-12-09T05:16:23,746 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733721383745, completionTime=-1 2024-12-09T05:16:23,746 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T05:16:23,746 DEBUG [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-09T05:16:23,746 DEBUG [hconnection-0x28864195-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:16:23,748 INFO [RS-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54544, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:16:23,749 INFO [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-09T05:16:23,749 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733721443749 2024-12-09T05:16:23,749 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733721503749 2024-12-09T05:16:23,749 INFO [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-09T05:16:23,754 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,34009,1733721382886-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,754 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,34009,1733721382886-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,754 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,34009,1733721382886-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,754 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-41a709354867:34009, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,754 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:23,754 INFO [master/41a709354867:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-09T05:16:23,754 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:16:23,755 DEBUG [master/41a709354867:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-09T05:16:23,755 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-09T05:16:23,757 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:16:23,757 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:23,757 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:16:23,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:16:23,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:16:23,768 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a6b6a225afae51ef4045810ef3ac3226, NAME => 'hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215 2024-12-09T05:16:23,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:16:23,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:16:23,776 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:16:23,776 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing a6b6a225afae51ef4045810ef3ac3226, disabling compactions & flushes 2024-12-09T05:16:23,776 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:16:23,776 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:16:23,776 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. after waiting 0 ms 2024-12-09T05:16:23,776 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:16:23,776 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:16:23,776 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for a6b6a225afae51ef4045810ef3ac3226: 2024-12-09T05:16:23,777 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:16:23,778 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733721383777"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721383777"}]},"ts":"1733721383777"} 2024-12-09T05:16:23,780 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T05:16:23,781 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:16:23,781 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721383781"}]},"ts":"1733721383781"} 2024-12-09T05:16:23,783 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-09T05:16:23,787 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=a6b6a225afae51ef4045810ef3ac3226, ASSIGN}] 2024-12-09T05:16:23,788 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=a6b6a225afae51ef4045810ef3ac3226, ASSIGN 2024-12-09T05:16:23,789 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=a6b6a225afae51ef4045810ef3ac3226, ASSIGN; state=OFFLINE, location=41a709354867,36791,1733721382956; forceNewPlan=false, retain=false 2024-12-09T05:16:23,939 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=a6b6a225afae51ef4045810ef3ac3226, regionState=OPENING, regionLocation=41a709354867,36791,1733721382956 2024-12-09T05:16:23,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure a6b6a225afae51ef4045810ef3ac3226, server=41a709354867,36791,1733721382956}] 2024-12-09T05:16:24,094 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 41a709354867,36791,1733721382956 2024-12-09T05:16:24,098 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:16:24,098 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => a6b6a225afae51ef4045810ef3ac3226, NAME => 'hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:16:24,098 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace a6b6a225afae51ef4045810ef3ac3226 2024-12-09T05:16:24,098 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:16:24,099 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for a6b6a225afae51ef4045810ef3ac3226 2024-12-09T05:16:24,099 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for a6b6a225afae51ef4045810ef3ac3226 2024-12-09T05:16:24,100 INFO [StoreOpener-a6b6a225afae51ef4045810ef3ac3226-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a6b6a225afae51ef4045810ef3ac3226 2024-12-09T05:16:24,101 INFO [StoreOpener-a6b6a225afae51ef4045810ef3ac3226-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a6b6a225afae51ef4045810ef3ac3226 columnFamilyName info 2024-12-09T05:16:24,101 DEBUG [StoreOpener-a6b6a225afae51ef4045810ef3ac3226-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:24,102 INFO [StoreOpener-a6b6a225afae51ef4045810ef3ac3226-1 {}] regionserver.HStore(327): Store=a6b6a225afae51ef4045810ef3ac3226/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:16:24,102 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/namespace/a6b6a225afae51ef4045810ef3ac3226 2024-12-09T05:16:24,103 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/namespace/a6b6a225afae51ef4045810ef3ac3226 2024-12-09T05:16:24,104 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for a6b6a225afae51ef4045810ef3ac3226 2024-12-09T05:16:24,106 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/namespace/a6b6a225afae51ef4045810ef3ac3226/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:16:24,107 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened a6b6a225afae51ef4045810ef3ac3226; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=833509, jitterRate=0.05986271798610687}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:16:24,107 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for a6b6a225afae51ef4045810ef3ac3226: 2024-12-09T05:16:24,107 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226., pid=6, masterSystemTime=1733721384094 2024-12-09T05:16:24,109 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:16:24,109 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:16:24,110 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=a6b6a225afae51ef4045810ef3ac3226, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,36791,1733721382956 2024-12-09T05:16:24,114 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T05:16:24,114 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure a6b6a225afae51ef4045810ef3ac3226, server=41a709354867,36791,1733721382956 in 170 msec 2024-12-09T05:16:24,117 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T05:16:24,117 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=a6b6a225afae51ef4045810ef3ac3226, ASSIGN in 327 msec 2024-12-09T05:16:24,117 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:16:24,118 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721384117"}]},"ts":"1733721384117"} 2024-12-09T05:16:24,119 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-09T05:16:24,122 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:16:24,124 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 368 msec 2024-12-09T05:16:24,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:24,157 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-09T05:16:24,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:16:24,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:24,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:16:24,163 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-09T05:16:24,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:16:24,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 11 msec 2024-12-09T05:16:24,185 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-09T05:16:24,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:16:24,197 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 10 msec 2024-12-09T05:16:24,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-09T05:16:24,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-09T05:16:24,213 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.229sec 2024-12-09T05:16:24,214 INFO [master/41a709354867:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T05:16:24,214 INFO [master/41a709354867:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T05:16:24,214 INFO [master/41a709354867:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T05:16:24,214 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T05:16:24,214 INFO [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T05:16:24,214 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,34009,1733721382886-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:16:24,214 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,34009,1733721382886-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T05:16:24,216 DEBUG [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-09T05:16:24,216 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T05:16:24,216 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,34009,1733721382886-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:16:24,279 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f4030a9 to 127.0.0.1:64550 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11b3d8e2 2024-12-09T05:16:24,283 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@545cb5b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:16:24,285 DEBUG [hconnection-0x17ba7ede-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:16:24,286 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:16:24,288 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=41a709354867,34009,1733721382886 2024-12-09T05:16:24,288 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:16:24,292 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-09T05:16:24,293 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T05:16:24,294 INFO [RS-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40646, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T05:16:24,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T05:16:24,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T05:16:24,296 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:16:24,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T05:16:24,298 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:16:24,298 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:24,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 9 2024-12-09T05:16:24,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T05:16:24,299 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:16:24,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741837_1013 (size=405) 2024-12-09T05:16:24,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741837_1013 (size=405) 2024-12-09T05:16:24,308 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ec01a52831cd7b5dd666f4b7a7bb4975, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215 2024-12-09T05:16:24,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741838_1014 (size=88) 2024-12-09T05:16:24,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741838_1014 (size=88) 2024-12-09T05:16:24,315 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:16:24,315 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1681): Closing ec01a52831cd7b5dd666f4b7a7bb4975, disabling compactions & flushes 2024-12-09T05:16:24,315 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:16:24,315 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:16:24,315 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. after waiting 0 ms 2024-12-09T05:16:24,315 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:16:24,315 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:16:24,315 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for ec01a52831cd7b5dd666f4b7a7bb4975: 2024-12-09T05:16:24,316 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:16:24,317 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733721384316"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721384316"}]},"ts":"1733721384316"} 2024-12-09T05:16:24,318 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T05:16:24,319 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:16:24,319 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721384319"}]},"ts":"1733721384319"} 2024-12-09T05:16:24,321 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-09T05:16:24,325 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec01a52831cd7b5dd666f4b7a7bb4975, ASSIGN}] 2024-12-09T05:16:24,326 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec01a52831cd7b5dd666f4b7a7bb4975, ASSIGN 2024-12-09T05:16:24,327 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec01a52831cd7b5dd666f4b7a7bb4975, ASSIGN; state=OFFLINE, location=41a709354867,36791,1733721382956; forceNewPlan=false, retain=false 2024-12-09T05:16:24,477 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=ec01a52831cd7b5dd666f4b7a7bb4975, regionState=OPENING, regionLocation=41a709354867,36791,1733721382956 2024-12-09T05:16:24,479 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure ec01a52831cd7b5dd666f4b7a7bb4975, server=41a709354867,36791,1733721382956}] 2024-12-09T05:16:24,631 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 41a709354867,36791,1733721382956 2024-12-09T05:16:24,635 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:16:24,636 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => ec01a52831cd7b5dd666f4b7a7bb4975, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:16:24,636 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling ec01a52831cd7b5dd666f4b7a7bb4975 2024-12-09T05:16:24,636 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:16:24,636 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for ec01a52831cd7b5dd666f4b7a7bb4975 2024-12-09T05:16:24,636 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for ec01a52831cd7b5dd666f4b7a7bb4975 2024-12-09T05:16:24,637 INFO [StoreOpener-ec01a52831cd7b5dd666f4b7a7bb4975-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ec01a52831cd7b5dd666f4b7a7bb4975 2024-12-09T05:16:24,639 INFO [StoreOpener-ec01a52831cd7b5dd666f4b7a7bb4975-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec01a52831cd7b5dd666f4b7a7bb4975 columnFamilyName info 2024-12-09T05:16:24,639 DEBUG [StoreOpener-ec01a52831cd7b5dd666f4b7a7bb4975-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:16:24,639 INFO [StoreOpener-ec01a52831cd7b5dd666f4b7a7bb4975-1 {}] regionserver.HStore(327): Store=ec01a52831cd7b5dd666f4b7a7bb4975/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:16:24,640 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975 2024-12-09T05:16:24,640 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975 2024-12-09T05:16:24,642 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for ec01a52831cd7b5dd666f4b7a7bb4975 2024-12-09T05:16:24,644 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:16:24,645 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened ec01a52831cd7b5dd666f4b7a7bb4975; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817944, jitterRate=0.04006986320018768}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:16:24,645 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for ec01a52831cd7b5dd666f4b7a7bb4975: 2024-12-09T05:16:24,646 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975., pid=11, masterSystemTime=1733721384631 2024-12-09T05:16:24,648 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:16:24,648 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:16:24,649 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=ec01a52831cd7b5dd666f4b7a7bb4975, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,36791,1733721382956 2024-12-09T05:16:24,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-09T05:16:24,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure ec01a52831cd7b5dd666f4b7a7bb4975, server=41a709354867,36791,1733721382956 in 171 msec 2024-12-09T05:16:24,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T05:16:24,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec01a52831cd7b5dd666f4b7a7bb4975, ASSIGN in 327 msec 2024-12-09T05:16:24,655 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:16:24,655 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721384655"}]},"ts":"1733721384655"} 2024-12-09T05:16:24,657 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-09T05:16:24,660 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:16:24,661 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 364 msec 2024-12-09T05:16:25,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:25,188 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T05:16:25,188 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T05:16:26,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:27,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:28,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:29,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:29,223 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T05:16:29,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,241 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,242 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,242 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,242 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,242 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,243 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:16:29,257 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T05:16:29,257 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-09T05:16:30,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:31,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:32,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:33,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:33,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta after 68047ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor238.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:16:34,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:34,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T05:16:34,301 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 9 completed 2024-12-09T05:16:34,304 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T05:16:34,304 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:16:34,311 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush hbase:namespace 2024-12-09T05:16:34,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace 2024-12-09T05:16:34,318 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_PREPARE 2024-12-09T05:16:34,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-09T05:16:34,319 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T05:16:34,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T05:16:34,481 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 41a709354867,36791,1733721382956 2024-12-09T05:16:34,482 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36791 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-09T05:16:34,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:16:34,483 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing a6b6a225afae51ef4045810ef3ac3226 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-09T05:16:34,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/namespace/a6b6a225afae51ef4045810ef3ac3226/.tmp/info/6ef770f6e05d49519d6f9e370c70aed7 is 45, key is default/info:d/1733721384168/Put/seqid=0 2024-12-09T05:16:34,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741839_1015 (size=5037) 2024-12-09T05:16:34,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741839_1015 (size=5037) 2024-12-09T05:16:34,505 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/namespace/a6b6a225afae51ef4045810ef3ac3226/.tmp/info/6ef770f6e05d49519d6f9e370c70aed7 2024-12-09T05:16:34,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/namespace/a6b6a225afae51ef4045810ef3ac3226/.tmp/info/6ef770f6e05d49519d6f9e370c70aed7 as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/namespace/a6b6a225afae51ef4045810ef3ac3226/info/6ef770f6e05d49519d6f9e370c70aed7 2024-12-09T05:16:34,517 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/namespace/a6b6a225afae51ef4045810ef3ac3226/info/6ef770f6e05d49519d6f9e370c70aed7, entries=2, sequenceid=6, filesize=4.9 K 2024-12-09T05:16:34,518 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for a6b6a225afae51ef4045810ef3ac3226 in 35ms, sequenceid=6, compaction requested=false 2024-12-09T05:16:34,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for a6b6a225afae51ef4045810ef3ac3226: 2024-12-09T05:16:34,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:16:34,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-09T05:16:34,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-09T05:16:34,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-09T05:16:34,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 202 msec 2024-12-09T05:16:34,527 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace in 213 msec 2024-12-09T05:16:35,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:36,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:37,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:38,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:39,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:40,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:41,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:42,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:43,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:44,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:44,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-09T05:16:44,320 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: hbase:namespace, procId: 12 completed 2024-12-09T05:16:44,327 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T05:16:44,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T05:16:44,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-09T05:16:44,329 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T05:16:44,330 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T05:16:44,330 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T05:16:44,483 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 41a709354867,36791,1733721382956 2024-12-09T05:16:44,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36791 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-09T05:16:44,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:16:44,484 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing ec01a52831cd7b5dd666f4b7a7bb4975 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T05:16:44,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/13e55cfa00824aa9a2f159c8d7e36560 is 1080, key is row0001/info:/1733721404323/Put/seqid=0 2024-12-09T05:16:44,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741840_1016 (size=6033) 2024-12-09T05:16:44,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741840_1016 (size=6033) 2024-12-09T05:16:44,508 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/13e55cfa00824aa9a2f159c8d7e36560 2024-12-09T05:16:44,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/13e55cfa00824aa9a2f159c8d7e36560 as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/13e55cfa00824aa9a2f159c8d7e36560 2024-12-09T05:16:44,520 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/13e55cfa00824aa9a2f159c8d7e36560, entries=1, sequenceid=5, filesize=5.9 K 2024-12-09T05:16:44,521 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec01a52831cd7b5dd666f4b7a7bb4975 in 36ms, sequenceid=5, compaction requested=false 2024-12-09T05:16:44,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for ec01a52831cd7b5dd666f4b7a7bb4975: 2024-12-09T05:16:44,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:16:44,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-09T05:16:44,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-09T05:16:44,524 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-09T05:16:44,524 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 192 msec 2024-12-09T05:16:44,526 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 198 msec 2024-12-09T05:16:45,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:46,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:47,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:48,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:49,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:50,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:51,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:52,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:52,870 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T05:16:53,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:53,994 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 688aa77f4712fd33e61f733d63bfbd0a, had cached 0 bytes from a total of 23930 2024-12-09T05:16:54,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:54,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-09T05:16:54,331 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 14 completed 2024-12-09T05:16:54,335 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T05:16:54,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T05:16:54,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-09T05:16:54,337 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T05:16:54,338 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T05:16:54,338 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T05:16:54,490 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 41a709354867,36791,1733721382956 2024-12-09T05:16:54,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36791 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-09T05:16:54,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:16:54,491 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing ec01a52831cd7b5dd666f4b7a7bb4975 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T05:16:54,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/ff26349f11644eaba8af7f25a8461ac9 is 1080, key is row0002/info:/1733721414331/Put/seqid=0 2024-12-09T05:16:54,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741841_1017 (size=6033) 2024-12-09T05:16:54,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741841_1017 (size=6033) 2024-12-09T05:16:54,502 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/ff26349f11644eaba8af7f25a8461ac9 2024-12-09T05:16:54,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/ff26349f11644eaba8af7f25a8461ac9 as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/ff26349f11644eaba8af7f25a8461ac9 2024-12-09T05:16:54,514 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/ff26349f11644eaba8af7f25a8461ac9, entries=1, sequenceid=9, filesize=5.9 K 2024-12-09T05:16:54,514 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec01a52831cd7b5dd666f4b7a7bb4975 in 23ms, sequenceid=9, compaction requested=false 2024-12-09T05:16:54,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for ec01a52831cd7b5dd666f4b7a7bb4975: 2024-12-09T05:16:54,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:16:54,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-09T05:16:54,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-09T05:16:54,518 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-09T05:16:54,518 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-12-09T05:16:54,520 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-12-09T05:16:55,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:56,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:56,194 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:16:56,196 INFO [RS-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44046, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:16:57,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:58,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:16:59,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:00,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:01,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:02,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:03,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:04,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:04,231 INFO [master/41a709354867:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T05:17:04,231 INFO [master/41a709354867:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T05:17:04,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-09T05:17:04,339 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 16 completed 2024-12-09T05:17:04,341 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36791%2C1733721382956.1733721424341 2024-12-09T05:17:04,348 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.1733721383346 with entries=13, filesize=6.41 KB; new WAL /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.1733721424341 2024-12-09T05:17:04,348 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45715:45715),(127.0.0.1/127.0.0.1:43121:43121)] 2024-12-09T05:17:04,349 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.1733721383346 is not closed yet, will try archiving it next time 2024-12-09T05:17:04,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741833_1009 (size=6574) 2024-12-09T05:17:04,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741833_1009 (size=6574) 2024-12-09T05:17:04,352 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T05:17:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T05:17:04,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-09T05:17:04,354 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T05:17:04,355 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T05:17:04,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T05:17:04,506 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 41a709354867,36791,1733721382956 2024-12-09T05:17:04,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36791 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-09T05:17:04,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:17:04,507 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing ec01a52831cd7b5dd666f4b7a7bb4975 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T05:17:04,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/174351fdf4474845b081aa0db9bfd444 is 1080, key is row0003/info:/1733721424339/Put/seqid=0 2024-12-09T05:17:04,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741843_1019 (size=6033) 2024-12-09T05:17:04,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741843_1019 (size=6033) 2024-12-09T05:17:04,518 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/174351fdf4474845b081aa0db9bfd444 2024-12-09T05:17:04,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/174351fdf4474845b081aa0db9bfd444 as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/174351fdf4474845b081aa0db9bfd444 2024-12-09T05:17:04,529 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/174351fdf4474845b081aa0db9bfd444, entries=1, sequenceid=13, filesize=5.9 K 2024-12-09T05:17:04,530 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec01a52831cd7b5dd666f4b7a7bb4975 in 23ms, sequenceid=13, compaction requested=true 2024-12-09T05:17:04,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for ec01a52831cd7b5dd666f4b7a7bb4975: 2024-12-09T05:17:04,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:17:04,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-09T05:17:04,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-09T05:17:04,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-09T05:17:04,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-12-09T05:17:04,535 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-12-09T05:17:05,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:06,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:07,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:08,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:09,099 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region a6b6a225afae51ef4045810ef3ac3226, had cached 0 bytes from a total of 5037 2024-12-09T05:17:09,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:09,636 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region ec01a52831cd7b5dd666f4b7a7bb4975, had cached 0 bytes from a total of 18099 2024-12-09T05:17:10,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:11,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:12,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:13,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:14,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:14,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-09T05:17:14,355 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 18 completed 2024-12-09T05:17:14,355 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T05:17:14,357 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T05:17:14,357 DEBUG [Time-limited test {}] regionserver.HStore(1540): ec01a52831cd7b5dd666f4b7a7bb4975/info is initiating minor compaction (all files) 2024-12-09T05:17:14,357 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:17:14,357 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:14,357 INFO [Time-limited test {}] regionserver.HRegion(2351): Starting compaction of ec01a52831cd7b5dd666f4b7a7bb4975/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:17:14,357 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/13e55cfa00824aa9a2f159c8d7e36560, hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/ff26349f11644eaba8af7f25a8461ac9, hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/174351fdf4474845b081aa0db9bfd444] into tmpdir=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp, totalSize=17.7 K 2024-12-09T05:17:14,358 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 13e55cfa00824aa9a2f159c8d7e36560, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733721404323 2024-12-09T05:17:14,358 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting ff26349f11644eaba8af7f25a8461ac9, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733721414331 2024-12-09T05:17:14,358 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 174351fdf4474845b081aa0db9bfd444, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733721424339 2024-12-09T05:17:14,370 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): ec01a52831cd7b5dd666f4b7a7bb4975#info#compaction#30 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T05:17:14,371 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/e4635b0084174e298aef4c144f425d81 is 1080, key is row0001/info:/1733721404323/Put/seqid=0 2024-12-09T05:17:14,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741844_1020 (size=8296) 2024-12-09T05:17:14,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741844_1020 (size=8296) 2024-12-09T05:17:14,390 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/e4635b0084174e298aef4c144f425d81 as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/e4635b0084174e298aef4c144f425d81 2024-12-09T05:17:14,397 INFO [Time-limited test {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec01a52831cd7b5dd666f4b7a7bb4975/info of ec01a52831cd7b5dd666f4b7a7bb4975 into e4635b0084174e298aef4c144f425d81(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T05:17:14,397 DEBUG [Time-limited test {}] regionserver.HRegion(2381): Compaction status journal for ec01a52831cd7b5dd666f4b7a7bb4975: 2024-12-09T05:17:14,399 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36791%2C1733721382956.1733721434399 2024-12-09T05:17:14,406 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.1733721424341 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.1733721434399 2024-12-09T05:17:14,406 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45715:45715),(127.0.0.1/127.0.0.1:43121:43121)] 2024-12-09T05:17:14,406 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.1733721424341 is not closed yet, will try archiving it next time 2024-12-09T05:17:14,407 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.1733721383346 to hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/oldWALs/41a709354867%2C36791%2C1733721382956.1733721383346 2024-12-09T05:17:14,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741842_1018 (size=2520) 2024-12-09T05:17:14,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741842_1018 (size=2520) 2024-12-09T05:17:14,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T05:17:14,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T05:17:14,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-09T05:17:14,412 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T05:17:14,413 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T05:17:14,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T05:17:14,565 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 41a709354867,36791,1733721382956 2024-12-09T05:17:14,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36791 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-09T05:17:14,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:17:14,566 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing ec01a52831cd7b5dd666f4b7a7bb4975 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T05:17:14,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/7d472791fab84c9bb573d0dd0fcafd57 is 1080, key is row0000/info:/1733721434398/Put/seqid=0 2024-12-09T05:17:14,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741846_1022 (size=6033) 2024-12-09T05:17:14,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741846_1022 (size=6033) 2024-12-09T05:17:14,576 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/7d472791fab84c9bb573d0dd0fcafd57 2024-12-09T05:17:14,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/7d472791fab84c9bb573d0dd0fcafd57 as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/7d472791fab84c9bb573d0dd0fcafd57 2024-12-09T05:17:14,588 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/7d472791fab84c9bb573d0dd0fcafd57, entries=1, sequenceid=18, filesize=5.9 K 2024-12-09T05:17:14,589 INFO [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec01a52831cd7b5dd666f4b7a7bb4975 in 24ms, sequenceid=18, compaction requested=false 2024-12-09T05:17:14,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for ec01a52831cd7b5dd666f4b7a7bb4975: 2024-12-09T05:17:14,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:17:14,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/41a709354867:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-09T05:17:14,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-09T05:17:14,592 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-09T05:17:14,592 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-12-09T05:17:14,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-12-09T05:17:15,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:16,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:17,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:18,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:19,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:20,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:21,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:22,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:22,870 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T05:17:23,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:24,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34009 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-09T05:17:24,413 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 20 completed 2024-12-09T05:17:24,415 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C36791%2C1733721382956.1733721444415 2024-12-09T05:17:24,422 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.1733721434399 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.1733721444415 2024-12-09T05:17:24,422 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45715:45715),(127.0.0.1/127.0.0.1:43121:43121)] 2024-12-09T05:17:24,422 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.1733721434399 is not closed yet, will try archiving it next time 2024-12-09T05:17:24,422 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956/41a709354867%2C36791%2C1733721382956.1733721424341 to hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/oldWALs/41a709354867%2C36791%2C1733721382956.1733721424341 2024-12-09T05:17:24,423 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-09T05:17:24,423 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-09T05:17:24,423 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f4030a9 to 127.0.0.1:64550 2024-12-09T05:17:24,423 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:17:24,423 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T05:17:24,423 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1518126563, stopped=false 2024-12-09T05:17:24,423 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=41a709354867,34009,1733721382886 2024-12-09T05:17:24,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741845_1021 (size=2026) 2024-12-09T05:17:24,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741845_1021 (size=2026) 2024-12-09T05:17:24,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:17:24,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:17:24,425 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-09T05:17:24,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:24,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:24,425 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:17:24,425 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,36791,1733721382956' ***** 2024-12-09T05:17:24,425 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-09T05:17:24,426 INFO [RS:0;41a709354867:36791 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:17:24,426 INFO [RS:0;41a709354867:36791 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:17:24,426 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-09T05:17:24,426 INFO [RS:0;41a709354867:36791 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:17:24,426 INFO [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(3579): Received CLOSE for ec01a52831cd7b5dd666f4b7a7bb4975 2024-12-09T05:17:24,427 INFO [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(3579): Received CLOSE for a6b6a225afae51ef4045810ef3ac3226 2024-12-09T05:17:24,427 INFO [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,36791,1733721382956 2024-12-09T05:17:24,427 DEBUG [RS:0;41a709354867:36791 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:17:24,427 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing ec01a52831cd7b5dd666f4b7a7bb4975, disabling compactions & flushes 2024-12-09T05:17:24,427 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:17:24,427 INFO [RS:0;41a709354867:36791 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:17:24,427 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:17:24,427 INFO [RS:0;41a709354867:36791 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:17:24,427 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. after waiting 0 ms 2024-12-09T05:17:24,427 INFO [RS:0;41a709354867:36791 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:17:24,427 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:17:24,427 INFO [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-09T05:17:24,427 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing ec01a52831cd7b5dd666f4b7a7bb4975 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T05:17:24,427 INFO [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-09T05:17:24,427 DEBUG [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, ec01a52831cd7b5dd666f4b7a7bb4975=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975., a6b6a225afae51ef4045810ef3ac3226=hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226.} 2024-12-09T05:17:24,427 DEBUG [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, a6b6a225afae51ef4045810ef3ac3226, ec01a52831cd7b5dd666f4b7a7bb4975 2024-12-09T05:17:24,427 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:17:24,428 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:17:24,428 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:17:24,428 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:17:24,428 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:17:24,428 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=3.05 KB heapSize=5.55 KB 2024-12-09T05:17:24,428 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:17:24,428 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:17:24,432 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/b4c84fcb438e42f68d564e0d9d852fd9 is 1080, key is row0001/info:/1733721444414/Put/seqid=0 2024-12-09T05:17:24,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741848_1024 (size=6033) 2024-12-09T05:17:24,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741848_1024 (size=6033) 2024-12-09T05:17:24,444 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/b4c84fcb438e42f68d564e0d9d852fd9 2024-12-09T05:17:24,450 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/.tmp/info/b4c84fcb438e42f68d564e0d9d852fd9 as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/b4c84fcb438e42f68d564e0d9d852fd9 2024-12-09T05:17:24,451 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740/.tmp/info/2469605022c44b03b20b4ec2de404342 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975./info:regioninfo/1733721384648/Put/seqid=0 2024-12-09T05:17:24,456 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/b4c84fcb438e42f68d564e0d9d852fd9, entries=1, sequenceid=22, filesize=5.9 K 2024-12-09T05:17:24,457 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec01a52831cd7b5dd666f4b7a7bb4975 in 30ms, sequenceid=22, compaction requested=true 2024-12-09T05:17:24,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741849_1025 (size=8430) 2024-12-09T05:17:24,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741849_1025 (size=8430) 2024-12-09T05:17:24,462 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740/.tmp/info/2469605022c44b03b20b4ec2de404342 2024-12-09T05:17:24,468 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/13e55cfa00824aa9a2f159c8d7e36560, hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/ff26349f11644eaba8af7f25a8461ac9, hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/174351fdf4474845b081aa0db9bfd444] to archive 2024-12-09T05:17:24,469 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T05:17:24,470 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/13e55cfa00824aa9a2f159c8d7e36560 to hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/13e55cfa00824aa9a2f159c8d7e36560 2024-12-09T05:17:24,472 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/ff26349f11644eaba8af7f25a8461ac9 to hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/ff26349f11644eaba8af7f25a8461ac9 2024-12-09T05:17:24,473 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/174351fdf4474845b081aa0db9bfd444 to hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/info/174351fdf4474845b081aa0db9bfd444 2024-12-09T05:17:24,478 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec01a52831cd7b5dd666f4b7a7bb4975/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-09T05:17:24,478 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:17:24,478 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for ec01a52831cd7b5dd666f4b7a7bb4975: 2024-12-09T05:17:24,478 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733721384295.ec01a52831cd7b5dd666f4b7a7bb4975. 2024-12-09T05:17:24,479 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing a6b6a225afae51ef4045810ef3ac3226, disabling compactions & flushes 2024-12-09T05:17:24,479 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:17:24,479 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:17:24,479 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. after waiting 0 ms 2024-12-09T05:17:24,479 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:17:24,483 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/namespace/a6b6a225afae51ef4045810ef3ac3226/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T05:17:24,483 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:17:24,483 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for a6b6a225afae51ef4045810ef3ac3226: 2024-12-09T05:17:24,483 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733721383754.a6b6a225afae51ef4045810ef3ac3226. 2024-12-09T05:17:24,488 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740/.tmp/table/3b6795e3e8c84c2c9c39ad4722a4dbd0 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733721384655/Put/seqid=0 2024-12-09T05:17:24,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741850_1026 (size=5532) 2024-12-09T05:17:24,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741850_1026 (size=5532) 2024-12-09T05:17:24,493 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=264 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740/.tmp/table/3b6795e3e8c84c2c9c39ad4722a4dbd0 2024-12-09T05:17:24,499 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740/.tmp/info/2469605022c44b03b20b4ec2de404342 as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740/info/2469605022c44b03b20b4ec2de404342 2024-12-09T05:17:24,504 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740/info/2469605022c44b03b20b4ec2de404342, entries=20, sequenceid=14, filesize=8.2 K 2024-12-09T05:17:24,505 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740/.tmp/table/3b6795e3e8c84c2c9c39ad4722a4dbd0 as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740/table/3b6795e3e8c84c2c9c39ad4722a4dbd0 2024-12-09T05:17:24,510 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740/table/3b6795e3e8c84c2c9c39ad4722a4dbd0, entries=4, sequenceid=14, filesize=5.4 K 2024-12-09T05:17:24,511 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~3.05 KB/3122, heapSize ~5.27 KB/5400, currentSize=0 B/0 for 1588230740 in 83ms, sequenceid=14, compaction requested=false 2024-12-09T05:17:24,515 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-09T05:17:24,515 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T05:17:24,516 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T05:17:24,516 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:17:24,516 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T05:17:24,628 INFO [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,36791,1733721382956; all regions closed. 2024-12-09T05:17:24,628 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956 2024-12-09T05:17:24,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741834_1010 (size=4570) 2024-12-09T05:17:24,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741834_1010 (size=4570) 2024-12-09T05:17:24,632 DEBUG [RS:0;41a709354867:36791 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/oldWALs 2024-12-09T05:17:24,633 INFO [RS:0;41a709354867:36791 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 41a709354867%2C36791%2C1733721382956.meta:.meta(num 1733721383715) 2024-12-09T05:17:24,633 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/WALs/41a709354867,36791,1733721382956 2024-12-09T05:17:24,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741847_1023 (size=1545) 2024-12-09T05:17:24,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741847_1023 (size=1545) 2024-12-09T05:17:24,638 DEBUG [RS:0;41a709354867:36791 {}] wal.AbstractFSWAL(1071): Moved 2 WAL file(s) to /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/oldWALs 2024-12-09T05:17:24,638 INFO [RS:0;41a709354867:36791 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 41a709354867%2C36791%2C1733721382956:(num 1733721444415) 2024-12-09T05:17:24,638 DEBUG [RS:0;41a709354867:36791 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:17:24,638 INFO [RS:0;41a709354867:36791 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:17:24,638 INFO [RS:0;41a709354867:36791 {}] hbase.ChoreService(370): Chore service for: regionserver/41a709354867:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T05:17:24,638 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:17:24,639 INFO [RS:0;41a709354867:36791 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36791 2024-12-09T05:17:24,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/41a709354867,36791,1733721382956 2024-12-09T05:17:24,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:17:24,642 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [41a709354867,36791,1733721382956] 2024-12-09T05:17:24,642 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 41a709354867,36791,1733721382956; numProcessing=1 2024-12-09T05:17:24,643 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/41a709354867,36791,1733721382956 already deleted, retry=false 2024-12-09T05:17:24,643 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 41a709354867,36791,1733721382956 expired; onlineServers=0 2024-12-09T05:17:24,643 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,34009,1733721382886' ***** 2024-12-09T05:17:24,643 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T05:17:24,644 DEBUG [M:0;41a709354867:34009 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bc61d52, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:17:24,644 INFO [M:0;41a709354867:34009 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,34009,1733721382886 2024-12-09T05:17:24,644 INFO [M:0;41a709354867:34009 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,34009,1733721382886; all regions closed. 2024-12-09T05:17:24,644 DEBUG [M:0;41a709354867:34009 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:17:24,644 DEBUG [M:0;41a709354867:34009 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T05:17:24,644 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T05:17:24,644 DEBUG [M:0;41a709354867:34009 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T05:17:24,644 DEBUG [master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721383112 {}] cleaner.HFileCleaner(306): Exit Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721383112,5,FailOnTimeoutGroup] 2024-12-09T05:17:24,644 DEBUG [master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721383112 {}] cleaner.HFileCleaner(306): Exit Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721383112,5,FailOnTimeoutGroup] 2024-12-09T05:17:24,644 INFO [M:0;41a709354867:34009 {}] hbase.ChoreService(370): Chore service for: master/41a709354867:0 had [] on shutdown 2024-12-09T05:17:24,644 DEBUG [M:0;41a709354867:34009 {}] master.HMaster(1733): Stopping service threads 2024-12-09T05:17:24,644 INFO [M:0;41a709354867:34009 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T05:17:24,645 INFO [M:0;41a709354867:34009 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T05:17:24,645 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T05:17:24,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T05:17:24,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:24,645 DEBUG [M:0;41a709354867:34009 {}] zookeeper.ZKUtil(347): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T05:17:24,645 WARN [M:0;41a709354867:34009 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T05:17:24,645 INFO [M:0;41a709354867:34009 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-09T05:17:24,645 INFO [M:0;41a709354867:34009 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T05:17:24,645 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:17:24,646 DEBUG [M:0;41a709354867:34009 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:17:24,646 INFO [M:0;41a709354867:34009 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:17:24,646 DEBUG [M:0;41a709354867:34009 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:17:24,646 DEBUG [M:0;41a709354867:34009 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:17:24,646 DEBUG [M:0;41a709354867:34009 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:17:24,646 INFO [M:0;41a709354867:34009 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=65.09 KB heapSize=81.71 KB 2024-12-09T05:17:24,661 DEBUG [M:0;41a709354867:34009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f3a97ef66a9c4183a65d20ecfdecb8ba is 82, key is hbase:meta,,1/info:regioninfo/1733721383734/Put/seqid=0 2024-12-09T05:17:24,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741851_1027 (size=5672) 2024-12-09T05:17:24,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741851_1027 (size=5672) 2024-12-09T05:17:24,667 INFO [M:0;41a709354867:34009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f3a97ef66a9c4183a65d20ecfdecb8ba 2024-12-09T05:17:24,694 DEBUG [M:0;41a709354867:34009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/53e9664231494553bb85f4d846afd003 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733721384661/Put/seqid=0 2024-12-09T05:17:24,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741852_1028 (size=8357) 2024-12-09T05:17:24,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741852_1028 (size=8357) 2024-12-09T05:17:24,700 INFO [M:0;41a709354867:34009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.48 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/53e9664231494553bb85f4d846afd003 2024-12-09T05:17:24,705 INFO [M:0;41a709354867:34009 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 53e9664231494553bb85f4d846afd003 2024-12-09T05:17:24,727 DEBUG [M:0;41a709354867:34009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6971a9745637422c8dc991d9d2df35e9 is 69, key is 41a709354867,36791,1733721382956/rs:state/1733721383198/Put/seqid=0 2024-12-09T05:17:24,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741853_1029 (size=5156) 2024-12-09T05:17:24,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741853_1029 (size=5156) 2024-12-09T05:17:24,733 INFO [M:0;41a709354867:34009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6971a9745637422c8dc991d9d2df35e9 2024-12-09T05:17:24,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:17:24,742 INFO [RS:0;41a709354867:36791 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,36791,1733721382956; zookeeper connection closed. 2024-12-09T05:17:24,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36791-0x100753498bf0001, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:17:24,743 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@e79b7e {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@e79b7e 2024-12-09T05:17:24,743 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T05:17:24,753 DEBUG [M:0;41a709354867:34009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/92778233d8d240f8b6f4d6797cfcba9a is 52, key is load_balancer_on/state:d/1733721384290/Put/seqid=0 2024-12-09T05:17:24,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741854_1030 (size=5056) 2024-12-09T05:17:24,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741854_1030 (size=5056) 2024-12-09T05:17:24,759 INFO [M:0;41a709354867:34009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/92778233d8d240f8b6f4d6797cfcba9a 2024-12-09T05:17:24,765 DEBUG [M:0;41a709354867:34009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f3a97ef66a9c4183a65d20ecfdecb8ba as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f3a97ef66a9c4183a65d20ecfdecb8ba 2024-12-09T05:17:24,770 INFO [M:0;41a709354867:34009 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f3a97ef66a9c4183a65d20ecfdecb8ba, entries=8, sequenceid=184, filesize=5.5 K 2024-12-09T05:17:24,771 DEBUG [M:0;41a709354867:34009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/53e9664231494553bb85f4d846afd003 as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/53e9664231494553bb85f4d846afd003 2024-12-09T05:17:24,776 INFO [M:0;41a709354867:34009 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 53e9664231494553bb85f4d846afd003 2024-12-09T05:17:24,776 INFO [M:0;41a709354867:34009 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/53e9664231494553bb85f4d846afd003, entries=21, sequenceid=184, filesize=8.2 K 2024-12-09T05:17:24,777 DEBUG [M:0;41a709354867:34009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6971a9745637422c8dc991d9d2df35e9 as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6971a9745637422c8dc991d9d2df35e9 2024-12-09T05:17:24,782 INFO [M:0;41a709354867:34009 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6971a9745637422c8dc991d9d2df35e9, entries=1, sequenceid=184, filesize=5.0 K 2024-12-09T05:17:24,783 DEBUG [M:0;41a709354867:34009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/92778233d8d240f8b6f4d6797cfcba9a as hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/92778233d8d240f8b6f4d6797cfcba9a 2024-12-09T05:17:24,788 INFO [M:0;41a709354867:34009 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37923/user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/92778233d8d240f8b6f4d6797cfcba9a, entries=1, sequenceid=184, filesize=4.9 K 2024-12-09T05:17:24,789 INFO [M:0;41a709354867:34009 {}] regionserver.HRegion(3040): Finished flush of dataSize ~65.09 KB/66649, heapSize ~81.65 KB/83608, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=184, compaction requested=false 2024-12-09T05:17:24,791 INFO [M:0;41a709354867:34009 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:17:24,791 DEBUG [M:0;41a709354867:34009 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:17:24,792 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/047e19f5-77ff-0eb3-5f93-1aa4e9657215/MasterData/WALs/41a709354867,34009,1733721382886 2024-12-09T05:17:24,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38741 is added to blk_1073741830_1006 (size=79170) 2024-12-09T05:17:24,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46471 is added to blk_1073741830_1006 (size=79170) 2024-12-09T05:17:24,798 INFO [M:0;41a709354867:34009 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-09T05:17:24,799 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:17:24,799 INFO [M:0;41a709354867:34009 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34009 2024-12-09T05:17:24,802 DEBUG [M:0;41a709354867:34009 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/41a709354867,34009,1733721382886 already deleted, retry=false 2024-12-09T05:17:24,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:17:24,905 INFO [M:0;41a709354867:34009 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,34009,1733721382886; zookeeper connection closed. 2024-12-09T05:17:24,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x100753498bf0000, quorum=127.0.0.1:64550, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:17:24,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@542bd7b1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:17:24,908 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f020452{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:17:24,908 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:17:24,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2bb1ebfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:17:24,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@eb85507{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/hadoop.log.dir/,STOPPED} 2024-12-09T05:17:24,910 WARN [BP-1845418823-172.17.0.2-1733721382165 heartbeating to localhost/127.0.0.1:37923 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:17:24,910 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:17:24,910 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:17:24,910 WARN [BP-1845418823-172.17.0.2-1733721382165 heartbeating to localhost/127.0.0.1:37923 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1845418823-172.17.0.2-1733721382165 (Datanode Uuid a6b5d21d-44cf-49c3-9653-7ae7e2fa3ece) service to localhost/127.0.0.1:37923 2024-12-09T05:17:24,911 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/cluster_86c7bcca-b1c1-5608-0327-531452e5e089/dfs/data/data3/current/BP-1845418823-172.17.0.2-1733721382165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:17:24,911 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/cluster_86c7bcca-b1c1-5608-0327-531452e5e089/dfs/data/data4/current/BP-1845418823-172.17.0.2-1733721382165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:17:24,911 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:17:24,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@51145f43{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:17:24,914 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1efd4b91{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:17:24,914 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:17:24,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@210540b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:17:24,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29eebd65{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/hadoop.log.dir/,STOPPED} 2024-12-09T05:17:24,916 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:17:24,916 WARN [BP-1845418823-172.17.0.2-1733721382165 heartbeating to localhost/127.0.0.1:37923 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:17:24,916 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:17:24,916 WARN [BP-1845418823-172.17.0.2-1733721382165 heartbeating to localhost/127.0.0.1:37923 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1845418823-172.17.0.2-1733721382165 (Datanode Uuid 2b4c85bc-a0e6-43e0-bb58-c21762514d0a) service to localhost/127.0.0.1:37923 2024-12-09T05:17:24,916 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/cluster_86c7bcca-b1c1-5608-0327-531452e5e089/dfs/data/data1/current/BP-1845418823-172.17.0.2-1733721382165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:17:24,917 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/cluster_86c7bcca-b1c1-5608-0327-531452e5e089/dfs/data/data2/current/BP-1845418823-172.17.0.2-1733721382165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:17:24,917 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:17:24,923 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49374f0d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:17:24,923 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e499d4b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:17:24,923 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:17:24,923 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7531f2c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:17:24,924 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67f78a6d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/hadoop.log.dir/,STOPPED} 2024-12-09T05:17:24,930 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-09T05:17:24,949 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-09T05:17:24,956 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=108 (was 100) - Thread LEAK? -, OpenFileDescriptor=464 (was 446) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=127 (was 166), ProcessCount=11 (was 11), AvailableMemoryMB=7859 (was 7996) 2024-12-09T05:17:24,963 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=109, OpenFileDescriptor=464, MaxFileDescriptor=1048576, SystemLoadAverage=127, ProcessCount=11, AvailableMemoryMB=7859 2024-12-09T05:17:24,963 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T05:17:24,963 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/hadoop.log.dir so I do NOT create it in target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77 2024-12-09T05:17:24,963 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/88b6bf0b-f4ba-75d5-0ed5-078ecdd7fbff/hadoop.tmp.dir so I do NOT create it in target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77 2024-12-09T05:17:24,963 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/cluster_f0201350-2d61-1eab-6f3f-a5243f289acf, deleteOnExit=true 2024-12-09T05:17:24,963 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-09T05:17:24,963 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/test.cache.data in system properties and HBase conf 2024-12-09T05:17:24,963 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T05:17:24,963 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/hadoop.log.dir in system properties and HBase conf 2024-12-09T05:17:24,963 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T05:17:24,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T05:17:24,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-09T05:17:24,964 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T05:17:24,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:17:24,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:17:24,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T05:17:24,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:17:24,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T05:17:24,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T05:17:24,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:17:24,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:17:24,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T05:17:24,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/nfs.dump.dir in system properties and HBase conf 2024-12-09T05:17:24,965 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/java.io.tmpdir in system properties and HBase conf 2024-12-09T05:17:24,965 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:17:24,965 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T05:17:24,965 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T05:17:24,978 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T05:17:25,037 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:17:25,041 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:17:25,043 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:17:25,043 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:17:25,043 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:17:25,043 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:17:25,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b90cefa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:17:25,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d8a3285{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:17:25,158 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@155b716c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/java.io.tmpdir/jetty-localhost-39081-hadoop-hdfs-3_4_1-tests_jar-_-any-12772934930540417522/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:17:25,159 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@598161d8{HTTP/1.1, (http/1.1)}{localhost:39081} 2024-12-09T05:17:25,159 INFO [Time-limited test {}] server.Server(415): Started @286713ms 2024-12-09T05:17:25,172 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T05:17:25,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:25,188 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T05:17:25,188 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T05:17:25,215 INFO [regionserver/41a709354867:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:17:25,235 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:17:25,239 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:17:25,240 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:17:25,240 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:17:25,240 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:17:25,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@575e60d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:17:25,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70ec77da{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:17:25,383 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3bd654d5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/java.io.tmpdir/jetty-localhost-46529-hadoop-hdfs-3_4_1-tests_jar-_-any-310788175585482740/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:17:25,383 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b3e8d2b{HTTP/1.1, (http/1.1)}{localhost:46529} 2024-12-09T05:17:25,383 INFO [Time-limited test {}] server.Server(415): Started @286937ms 2024-12-09T05:17:25,385 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:17:25,422 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:17:25,426 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:17:25,431 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:17:25,431 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:17:25,431 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:17:25,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@379b4071{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:17:25,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53e8a72b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:17:25,475 WARN [Thread-1700 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/cluster_f0201350-2d61-1eab-6f3f-a5243f289acf/dfs/data/data1/current/BP-1944428553-172.17.0.2-1733721444988/current, will proceed with Du for space computation calculation, 2024-12-09T05:17:25,476 WARN [Thread-1701 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/cluster_f0201350-2d61-1eab-6f3f-a5243f289acf/dfs/data/data2/current/BP-1944428553-172.17.0.2-1733721444988/current, will proceed with Du for space computation calculation, 2024-12-09T05:17:25,500 WARN [Thread-1679 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:17:25,503 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8facc03297eefe8e with lease ID 0x864688079be67bc6: Processing first storage report for DS-545cae0b-fd2e-42a6-b0c7-16fb6f65cea7 from datanode DatanodeRegistration(127.0.0.1:45159, datanodeUuid=5b212408-bc5f-40e6-97dd-12b7d946c782, infoPort=36645, infoSecurePort=0, ipcPort=42523, storageInfo=lv=-57;cid=testClusterID;nsid=2057726128;c=1733721444988) 2024-12-09T05:17:25,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8facc03297eefe8e with lease ID 0x864688079be67bc6: from storage DS-545cae0b-fd2e-42a6-b0c7-16fb6f65cea7 node DatanodeRegistration(127.0.0.1:45159, datanodeUuid=5b212408-bc5f-40e6-97dd-12b7d946c782, infoPort=36645, infoSecurePort=0, ipcPort=42523, storageInfo=lv=-57;cid=testClusterID;nsid=2057726128;c=1733721444988), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:17:25,504 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8facc03297eefe8e with lease ID 0x864688079be67bc6: Processing first storage report for DS-ab8f82c6-ade7-46ab-9827-22952e4172a6 from datanode DatanodeRegistration(127.0.0.1:45159, datanodeUuid=5b212408-bc5f-40e6-97dd-12b7d946c782, infoPort=36645, infoSecurePort=0, ipcPort=42523, storageInfo=lv=-57;cid=testClusterID;nsid=2057726128;c=1733721444988) 2024-12-09T05:17:25,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8facc03297eefe8e with lease ID 0x864688079be67bc6: from storage DS-ab8f82c6-ade7-46ab-9827-22952e4172a6 node DatanodeRegistration(127.0.0.1:45159, datanodeUuid=5b212408-bc5f-40e6-97dd-12b7d946c782, infoPort=36645, infoSecurePort=0, ipcPort=42523, storageInfo=lv=-57;cid=testClusterID;nsid=2057726128;c=1733721444988), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:17:25,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@541e9d76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/java.io.tmpdir/jetty-localhost-42313-hadoop-hdfs-3_4_1-tests_jar-_-any-14446486463809876078/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:17:25,550 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f24f78c{HTTP/1.1, (http/1.1)}{localhost:42313} 2024-12-09T05:17:25,550 INFO [Time-limited test {}] server.Server(415): Started @287104ms 2024-12-09T05:17:25,551 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:17:25,640 WARN [Thread-1726 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/cluster_f0201350-2d61-1eab-6f3f-a5243f289acf/dfs/data/data3/current/BP-1944428553-172.17.0.2-1733721444988/current, will proceed with Du for space computation calculation, 2024-12-09T05:17:25,641 WARN [Thread-1727 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/cluster_f0201350-2d61-1eab-6f3f-a5243f289acf/dfs/data/data4/current/BP-1944428553-172.17.0.2-1733721444988/current, will proceed with Du for space computation calculation, 2024-12-09T05:17:25,665 WARN [Thread-1715 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:17:25,667 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc62df26bf8e48c76 with lease ID 0x864688079be67bc7: Processing first storage report for DS-0d19310e-3ca6-4fd3-93d4-b38ff9f8dc3f from datanode DatanodeRegistration(127.0.0.1:40731, datanodeUuid=496194f9-519a-41a1-813a-ac149f2bb4c9, infoPort=33801, infoSecurePort=0, ipcPort=36201, storageInfo=lv=-57;cid=testClusterID;nsid=2057726128;c=1733721444988) 2024-12-09T05:17:25,668 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc62df26bf8e48c76 with lease ID 0x864688079be67bc7: from storage DS-0d19310e-3ca6-4fd3-93d4-b38ff9f8dc3f node DatanodeRegistration(127.0.0.1:40731, datanodeUuid=496194f9-519a-41a1-813a-ac149f2bb4c9, infoPort=33801, infoSecurePort=0, ipcPort=36201, storageInfo=lv=-57;cid=testClusterID;nsid=2057726128;c=1733721444988), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:17:25,668 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc62df26bf8e48c76 with lease ID 0x864688079be67bc7: Processing first storage report for DS-fe347bfd-6be9-4315-9dd8-f678d8f6f40d from datanode DatanodeRegistration(127.0.0.1:40731, datanodeUuid=496194f9-519a-41a1-813a-ac149f2bb4c9, infoPort=33801, infoSecurePort=0, ipcPort=36201, storageInfo=lv=-57;cid=testClusterID;nsid=2057726128;c=1733721444988) 2024-12-09T05:17:25,668 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc62df26bf8e48c76 with lease ID 0x864688079be67bc7: from storage DS-fe347bfd-6be9-4315-9dd8-f678d8f6f40d node DatanodeRegistration(127.0.0.1:40731, datanodeUuid=496194f9-519a-41a1-813a-ac149f2bb4c9, infoPort=33801, infoSecurePort=0, ipcPort=36201, storageInfo=lv=-57;cid=testClusterID;nsid=2057726128;c=1733721444988), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:17:25,674 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77 2024-12-09T05:17:25,677 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/cluster_f0201350-2d61-1eab-6f3f-a5243f289acf/zookeeper_0, clientPort=59807, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/cluster_f0201350-2d61-1eab-6f3f-a5243f289acf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/cluster_f0201350-2d61-1eab-6f3f-a5243f289acf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T05:17:25,678 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=59807 2024-12-09T05:17:25,678 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:17:25,679 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:17:25,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:17:25,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:17:25,689 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a with version=8 2024-12-09T05:17:25,689 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/hbase-staging 2024-12-09T05:17:25,691 INFO [Time-limited test {}] client.ConnectionUtils(129): master/41a709354867:0 server-side Connection retries=45 2024-12-09T05:17:25,691 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:17:25,691 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:17:25,691 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:17:25,691 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:17:25,691 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:17:25,691 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:17:25,692 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:17:25,692 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35729 2024-12-09T05:17:25,693 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:17:25,694 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:17:25,696 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35729 connecting to ZooKeeper ensemble=127.0.0.1:59807 2024-12-09T05:17:25,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:357290x0, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:17:25,703 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35729-0x10075358e130000 connected 2024-12-09T05:17:25,718 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:17:25,719 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:17:25,719 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:17:25,723 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35729 2024-12-09T05:17:25,724 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35729 2024-12-09T05:17:25,726 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35729 2024-12-09T05:17:25,728 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35729 2024-12-09T05:17:25,728 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35729 2024-12-09T05:17:25,729 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a, hbase.cluster.distributed=false 2024-12-09T05:17:25,745 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/41a709354867:0 server-side Connection retries=45 2024-12-09T05:17:25,745 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:17:25,745 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:17:25,745 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:17:25,745 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:17:25,745 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:17:25,745 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:17:25,745 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:17:25,746 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:39913 2024-12-09T05:17:25,746 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:17:25,747 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:17:25,747 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:17:25,749 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:17:25,751 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:39913 connecting to ZooKeeper ensemble=127.0.0.1:59807 2024-12-09T05:17:25,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:399130x0, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:17:25,754 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39913-0x10075358e130001 connected 2024-12-09T05:17:25,754 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:17:25,755 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:17:25,755 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:17:25,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39913 2024-12-09T05:17:25,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39913 2024-12-09T05:17:25,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39913 2024-12-09T05:17:25,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39913 2024-12-09T05:17:25,757 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39913 2024-12-09T05:17:25,757 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/41a709354867,35729,1733721445690 2024-12-09T05:17:25,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:17:25,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:17:25,761 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/41a709354867,35729,1733721445690 2024-12-09T05:17:25,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:17:25,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:17:25,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:25,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:25,763 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:17:25,763 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/41a709354867,35729,1733721445690 from backup master directory 2024-12-09T05:17:25,764 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:17:25,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/41a709354867,35729,1733721445690 2024-12-09T05:17:25,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:17:25,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:17:25,765 WARN [master/41a709354867:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:17:25,765 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=41a709354867,35729,1733721445690 2024-12-09T05:17:25,770 DEBUG [M:0;41a709354867:35729 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;41a709354867:35729 2024-12-09T05:17:25,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:17:25,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:17:25,780 DEBUG [master/41a709354867:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/hbase.id with ID: 7c7e4fae-4605-4f69-905b-b9edc3b16429 2024-12-09T05:17:25,789 INFO [master/41a709354867:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:17:25,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:25,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:25,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:17:25,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:17:25,802 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:17:25,802 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T05:17:25,802 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:17:25,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:17:25,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:17:25,810 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store 2024-12-09T05:17:25,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:17:25,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:17:25,817 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:17:25,817 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:17:25,817 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:17:25,817 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:17:25,817 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:17:25,817 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:17:25,817 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:17:25,817 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:17:25,818 WARN [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/.initializing 2024-12-09T05:17:25,818 DEBUG [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/WALs/41a709354867,35729,1733721445690 2024-12-09T05:17:25,820 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C35729%2C1733721445690, suffix=, logDir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/WALs/41a709354867,35729,1733721445690, archiveDir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/oldWALs, maxLogs=10 2024-12-09T05:17:25,821 INFO [master/41a709354867:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C35729%2C1733721445690.1733721445820 2024-12-09T05:17:25,825 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/WALs/41a709354867,35729,1733721445690/41a709354867%2C35729%2C1733721445690.1733721445820 2024-12-09T05:17:25,825 DEBUG [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36645:36645),(127.0.0.1/127.0.0.1:33801:33801)] 2024-12-09T05:17:25,825 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:17:25,825 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:17:25,825 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:17:25,825 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:17:25,826 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:17:25,828 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T05:17:25,828 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:25,828 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:17:25,828 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:17:25,829 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T05:17:25,829 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:25,830 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:17:25,830 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:17:25,831 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T05:17:25,831 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:25,831 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:17:25,831 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:17:25,832 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T05:17:25,832 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:25,833 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:17:25,833 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:17:25,834 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:17:25,835 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T05:17:25,836 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:17:25,838 DEBUG [master/41a709354867:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:17:25,839 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708371, jitterRate=-0.09926091134548187}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T05:17:25,839 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:17:25,839 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T05:17:25,842 DEBUG [master/41a709354867:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b136d7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:17:25,843 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-09T05:17:25,843 INFO [master/41a709354867:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T05:17:25,843 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T05:17:25,843 INFO [master/41a709354867:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T05:17:25,844 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T05:17:25,844 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-09T05:17:25,844 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T05:17:25,845 INFO [master/41a709354867:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T05:17:25,846 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T05:17:25,847 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-09T05:17:25,848 INFO [master/41a709354867:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T05:17:25,848 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T05:17:25,850 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-09T05:17:25,850 INFO [master/41a709354867:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T05:17:25,851 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T05:17:25,852 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-09T05:17:25,852 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T05:17:25,853 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T05:17:25,855 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T05:17:25,856 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T05:17:25,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:17:25,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:17:25,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:25,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:25,858 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=41a709354867,35729,1733721445690, sessionid=0x10075358e130000, setting cluster-up flag (Was=false) 2024-12-09T05:17:25,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:25,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:25,866 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T05:17:25,867 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=41a709354867,35729,1733721445690 2024-12-09T05:17:25,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:25,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:25,875 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T05:17:25,876 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=41a709354867,35729,1733721445690 2024-12-09T05:17:25,878 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-09T05:17:25,878 INFO [master/41a709354867:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-09T05:17:25,878 INFO [master/41a709354867:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T05:17:25,879 DEBUG [master/41a709354867:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 41a709354867,35729,1733721445690 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T05:17:25,879 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:17:25,879 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:17:25,879 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:17:25,879 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:17:25,879 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/41a709354867:0, corePoolSize=10, maxPoolSize=10 2024-12-09T05:17:25,879 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:17:25,879 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:17:25,879 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:17:25,880 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:17:25,880 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-09T05:17:25,881 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:25,881 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:17:25,884 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733721475884 2024-12-09T05:17:25,884 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T05:17:25,885 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T05:17:25,885 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T05:17:25,885 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T05:17:25,885 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T05:17:25,885 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T05:17:25,885 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:25,885 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T05:17:25,885 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T05:17:25,885 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T05:17:25,885 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T05:17:25,886 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T05:17:25,886 DEBUG [master/41a709354867:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721445886,5,FailOnTimeoutGroup] 2024-12-09T05:17:25,886 DEBUG [master/41a709354867:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721445886,5,FailOnTimeoutGroup] 2024-12-09T05:17:25,886 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:25,886 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T05:17:25,886 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:25,886 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:25,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:17:25,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:17:25,890 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-09T05:17:25,890 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a 2024-12-09T05:17:25,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:17:25,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:17:25,897 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:17:25,898 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:17:25,899 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:17:25,899 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:25,899 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:17:25,900 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:17:25,901 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:17:25,901 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:25,901 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:17:25,901 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:17:25,902 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:17:25,902 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:25,902 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:17:25,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740 2024-12-09T05:17:25,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740 2024-12-09T05:17:25,905 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T05:17:25,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T05:17:25,908 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:17:25,908 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778639, jitterRate=-0.009910017251968384}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T05:17:25,908 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T05:17:25,908 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:17:25,908 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:17:25,908 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:17:25,908 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:17:25,908 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:17:25,909 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T05:17:25,909 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:17:25,910 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:17:25,910 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-09T05:17:25,910 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T05:17:25,911 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T05:17:25,912 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T05:17:25,971 DEBUG [RS:0;41a709354867:39913 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;41a709354867:39913 2024-12-09T05:17:25,972 INFO [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(1008): ClusterId : 7c7e4fae-4605-4f69-905b-b9edc3b16429 2024-12-09T05:17:25,972 DEBUG [RS:0;41a709354867:39913 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:17:25,974 DEBUG [RS:0;41a709354867:39913 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:17:25,974 DEBUG [RS:0;41a709354867:39913 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:17:25,977 DEBUG [RS:0;41a709354867:39913 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:17:25,977 DEBUG [RS:0;41a709354867:39913 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f5efa78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:17:25,977 DEBUG [RS:0;41a709354867:39913 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5362f138, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:17:25,977 INFO [RS:0;41a709354867:39913 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-09T05:17:25,977 INFO [RS:0;41a709354867:39913 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-09T05:17:25,977 DEBUG [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-09T05:17:25,978 INFO [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(3073): reportForDuty to master=41a709354867,35729,1733721445690 with isa=41a709354867/172.17.0.2:39913, startcode=1733721445744 2024-12-09T05:17:25,978 DEBUG [RS:0;41a709354867:39913 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:17:25,981 INFO [RS-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37625, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:17:25,981 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35729 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 41a709354867,39913,1733721445744 2024-12-09T05:17:25,981 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35729 {}] master.ServerManager(486): Registering regionserver=41a709354867,39913,1733721445744 2024-12-09T05:17:25,983 DEBUG [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a 2024-12-09T05:17:25,983 DEBUG [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:39619 2024-12-09T05:17:25,983 DEBUG [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-09T05:17:25,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:17:25,985 DEBUG [RS:0;41a709354867:39913 {}] zookeeper.ZKUtil(111): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/41a709354867,39913,1733721445744 2024-12-09T05:17:25,985 WARN [RS:0;41a709354867:39913 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:17:25,985 INFO [RS:0;41a709354867:39913 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:17:25,985 DEBUG [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744 2024-12-09T05:17:25,985 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [41a709354867,39913,1733721445744] 2024-12-09T05:17:25,988 DEBUG [RS:0;41a709354867:39913 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-09T05:17:25,988 INFO [RS:0;41a709354867:39913 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:17:25,989 INFO [RS:0;41a709354867:39913 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:17:25,990 INFO [RS:0;41a709354867:39913 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:17:25,990 INFO [RS:0;41a709354867:39913 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:25,990 INFO [RS:0;41a709354867:39913 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-09T05:17:25,991 INFO [RS:0;41a709354867:39913 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:17:25,991 DEBUG [RS:0;41a709354867:39913 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:17:25,992 INFO [RS:0;41a709354867:39913 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:25,992 INFO [RS:0;41a709354867:39913 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:25,992 INFO [RS:0;41a709354867:39913 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:25,992 INFO [RS:0;41a709354867:39913 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:25,992 INFO [RS:0;41a709354867:39913 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,39913,1733721445744-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:17:26,006 INFO [RS:0;41a709354867:39913 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:17:26,006 INFO [RS:0;41a709354867:39913 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,39913,1733721445744-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:26,021 INFO [RS:0;41a709354867:39913 {}] regionserver.Replication(204): 41a709354867,39913,1733721445744 started 2024-12-09T05:17:26,021 INFO [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(1767): Serving as 41a709354867,39913,1733721445744, RpcServer on 41a709354867/172.17.0.2:39913, sessionid=0x10075358e130001 2024-12-09T05:17:26,021 DEBUG [RS:0;41a709354867:39913 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:17:26,021 DEBUG [RS:0;41a709354867:39913 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 41a709354867,39913,1733721445744 2024-12-09T05:17:26,021 DEBUG [RS:0;41a709354867:39913 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,39913,1733721445744' 2024-12-09T05:17:26,021 DEBUG [RS:0;41a709354867:39913 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:17:26,021 DEBUG [RS:0;41a709354867:39913 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:17:26,022 DEBUG [RS:0;41a709354867:39913 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:17:26,022 DEBUG [RS:0;41a709354867:39913 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:17:26,022 DEBUG [RS:0;41a709354867:39913 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 41a709354867,39913,1733721445744 2024-12-09T05:17:26,022 DEBUG [RS:0;41a709354867:39913 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,39913,1733721445744' 2024-12-09T05:17:26,022 DEBUG [RS:0;41a709354867:39913 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:17:26,022 DEBUG [RS:0;41a709354867:39913 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:17:26,022 DEBUG [RS:0;41a709354867:39913 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:17:26,023 INFO [RS:0;41a709354867:39913 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:17:26,023 INFO [RS:0;41a709354867:39913 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:17:26,062 WARN [41a709354867:35729 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-09T05:17:26,124 INFO [RS:0;41a709354867:39913 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C39913%2C1733721445744, suffix=, logDir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744, archiveDir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/oldWALs, maxLogs=32 2024-12-09T05:17:26,125 INFO [RS:0;41a709354867:39913 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C39913%2C1733721445744.1733721446125 2024-12-09T05:17:26,131 INFO [RS:0;41a709354867:39913 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744/41a709354867%2C39913%2C1733721445744.1733721446125 2024-12-09T05:17:26,131 DEBUG [RS:0;41a709354867:39913 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33801:33801),(127.0.0.1/127.0.0.1:36645:36645)] 2024-12-09T05:17:26,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:26,312 DEBUG [41a709354867:35729 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T05:17:26,312 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=41a709354867,39913,1733721445744 2024-12-09T05:17:26,313 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 41a709354867,39913,1733721445744, state=OPENING 2024-12-09T05:17:26,315 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T05:17:26,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:26,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:26,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=41a709354867,39913,1733721445744}] 2024-12-09T05:17:26,318 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:17:26,318 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:17:26,471 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 41a709354867,39913,1733721445744 2024-12-09T05:17:26,471 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T05:17:26,473 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50942, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T05:17:26,477 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-09T05:17:26,477 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:17:26,479 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C39913%2C1733721445744.meta, suffix=.meta, logDir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744, archiveDir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/oldWALs, maxLogs=32 2024-12-09T05:17:26,479 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C39913%2C1733721445744.meta.1733721446479.meta 2024-12-09T05:17:26,489 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744/41a709354867%2C39913%2C1733721445744.meta.1733721446479.meta 2024-12-09T05:17:26,489 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36645:36645),(127.0.0.1/127.0.0.1:33801:33801)] 2024-12-09T05:17:26,489 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:17:26,489 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T05:17:26,490 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T05:17:26,490 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T05:17:26,490 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T05:17:26,490 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:17:26,490 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-09T05:17:26,490 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-09T05:17:26,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:17:26,492 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:17:26,492 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:26,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:17:26,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:17:26,493 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:17:26,493 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:26,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:17:26,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:17:26,494 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:17:26,495 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:26,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:17:26,496 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740 2024-12-09T05:17:26,497 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740 2024-12-09T05:17:26,498 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T05:17:26,499 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T05:17:26,500 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=875802, jitterRate=0.11364030838012695}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T05:17:26,500 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T05:17:26,501 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733721446471 2024-12-09T05:17:26,503 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T05:17:26,503 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-09T05:17:26,503 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,39913,1733721445744 2024-12-09T05:17:26,504 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 41a709354867,39913,1733721445744, state=OPEN 2024-12-09T05:17:26,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:17:26,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:17:26,509 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:17:26,509 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:17:26,510 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T05:17:26,511 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=41a709354867,39913,1733721445744 in 191 msec 2024-12-09T05:17:26,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T05:17:26,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 601 msec 2024-12-09T05:17:26,514 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 636 msec 2024-12-09T05:17:26,514 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733721446514, completionTime=-1 2024-12-09T05:17:26,514 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T05:17:26,514 DEBUG [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-09T05:17:26,515 DEBUG [hconnection-0x789a68fc-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:17:26,516 INFO [RS-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:17:26,517 INFO [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-09T05:17:26,517 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733721506517 2024-12-09T05:17:26,517 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733721566517 2024-12-09T05:17:26,517 INFO [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 2 msec 2024-12-09T05:17:26,523 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,35729,1733721445690-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:26,523 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,35729,1733721445690-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:26,523 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,35729,1733721445690-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:26,523 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-41a709354867:35729, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:26,523 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:26,523 INFO [master/41a709354867:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-09T05:17:26,524 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:17:26,525 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-09T05:17:26,525 DEBUG [master/41a709354867:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-09T05:17:26,526 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:17:26,526 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:26,527 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:17:26,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:17:26,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:17:26,538 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4c92af42adee675c0deec3b68367073c, NAME => 'hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a 2024-12-09T05:17:26,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:17:26,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:17:26,546 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:17:26,546 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 4c92af42adee675c0deec3b68367073c, disabling compactions & flushes 2024-12-09T05:17:26,546 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. 2024-12-09T05:17:26,546 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. 2024-12-09T05:17:26,546 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. after waiting 0 ms 2024-12-09T05:17:26,546 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. 2024-12-09T05:17:26,546 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. 2024-12-09T05:17:26,546 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4c92af42adee675c0deec3b68367073c: 2024-12-09T05:17:26,547 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:17:26,547 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733721446547"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721446547"}]},"ts":"1733721446547"} 2024-12-09T05:17:26,549 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T05:17:26,550 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:17:26,550 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721446550"}]},"ts":"1733721446550"} 2024-12-09T05:17:26,551 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-09T05:17:26,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4c92af42adee675c0deec3b68367073c, ASSIGN}] 2024-12-09T05:17:26,556 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4c92af42adee675c0deec3b68367073c, ASSIGN 2024-12-09T05:17:26,557 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=4c92af42adee675c0deec3b68367073c, ASSIGN; state=OFFLINE, location=41a709354867,39913,1733721445744; forceNewPlan=false, retain=false 2024-12-09T05:17:26,707 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4c92af42adee675c0deec3b68367073c, regionState=OPENING, regionLocation=41a709354867,39913,1733721445744 2024-12-09T05:17:26,709 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 4c92af42adee675c0deec3b68367073c, server=41a709354867,39913,1733721445744}] 2024-12-09T05:17:26,861 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 41a709354867,39913,1733721445744 2024-12-09T05:17:26,865 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. 2024-12-09T05:17:26,865 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 4c92af42adee675c0deec3b68367073c, NAME => 'hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:17:26,865 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 4c92af42adee675c0deec3b68367073c 2024-12-09T05:17:26,865 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:17:26,865 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 4c92af42adee675c0deec3b68367073c 2024-12-09T05:17:26,865 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 4c92af42adee675c0deec3b68367073c 2024-12-09T05:17:26,867 INFO [StoreOpener-4c92af42adee675c0deec3b68367073c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4c92af42adee675c0deec3b68367073c 2024-12-09T05:17:26,868 INFO [StoreOpener-4c92af42adee675c0deec3b68367073c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c92af42adee675c0deec3b68367073c columnFamilyName info 2024-12-09T05:17:26,868 DEBUG [StoreOpener-4c92af42adee675c0deec3b68367073c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:26,869 INFO [StoreOpener-4c92af42adee675c0deec3b68367073c-1 {}] regionserver.HStore(327): Store=4c92af42adee675c0deec3b68367073c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:17:26,870 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/namespace/4c92af42adee675c0deec3b68367073c 2024-12-09T05:17:26,870 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/namespace/4c92af42adee675c0deec3b68367073c 2024-12-09T05:17:26,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 4c92af42adee675c0deec3b68367073c 2024-12-09T05:17:26,873 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/namespace/4c92af42adee675c0deec3b68367073c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:17:26,874 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 4c92af42adee675c0deec3b68367073c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=858682, jitterRate=0.09187129139900208}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:17:26,874 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 4c92af42adee675c0deec3b68367073c: 2024-12-09T05:17:26,875 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c., pid=6, masterSystemTime=1733721446861 2024-12-09T05:17:26,876 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. 2024-12-09T05:17:26,876 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. 2024-12-09T05:17:26,877 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4c92af42adee675c0deec3b68367073c, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,39913,1733721445744 2024-12-09T05:17:26,880 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T05:17:26,880 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 4c92af42adee675c0deec3b68367073c, server=41a709354867,39913,1733721445744 in 169 msec 2024-12-09T05:17:26,881 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T05:17:26,881 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=4c92af42adee675c0deec3b68367073c, ASSIGN in 325 msec 2024-12-09T05:17:26,882 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:17:26,882 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721446882"}]},"ts":"1733721446882"} 2024-12-09T05:17:26,884 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-09T05:17:26,886 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:17:26,888 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 362 msec 2024-12-09T05:17:26,926 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-09T05:17:26,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:17:26,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:26,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:17:26,932 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-09T05:17:26,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:17:26,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 10 msec 2024-12-09T05:17:26,954 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-09T05:17:26,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:17:26,964 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 10 msec 2024-12-09T05:17:26,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-09T05:17:26,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-09T05:17:26,981 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.216sec 2024-12-09T05:17:26,981 INFO [master/41a709354867:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T05:17:26,981 INFO [master/41a709354867:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T05:17:26,981 INFO [master/41a709354867:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T05:17:26,981 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T05:17:26,981 INFO [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T05:17:26,981 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,35729,1733721445690-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:17:26,981 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,35729,1733721445690-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T05:17:26,983 DEBUG [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-09T05:17:26,983 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T05:17:26,983 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,35729,1733721445690-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:17:27,060 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0245eb78 to 127.0.0.1:59807 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e014bc 2024-12-09T05:17:27,063 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28fc516b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:17:27,065 DEBUG [hconnection-0x325b3c60-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:17:27,066 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50972, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:17:27,068 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=41a709354867,35729,1733721445690 2024-12-09T05:17:27,068 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:17:27,070 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-09T05:17:27,071 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T05:17:27,072 INFO [RS-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53440, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T05:17:27,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35729 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T05:17:27,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35729 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T05:17:27,074 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35729 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:17:27,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35729 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-09T05:17:27,075 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:17:27,076 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:27,076 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35729 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 9 2024-12-09T05:17:27,076 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:17:27,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35729 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T05:17:27,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741837_1013 (size=381) 2024-12-09T05:17:27,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741837_1013 (size=381) 2024-12-09T05:17:27,088 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 850d12d27f942676099898eccdc0c98c, NAME => 'TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a 2024-12-09T05:17:27,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741838_1014 (size=64) 2024-12-09T05:17:27,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741838_1014 (size=64) 2024-12-09T05:17:27,096 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:17:27,097 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing 850d12d27f942676099898eccdc0c98c, disabling compactions & flushes 2024-12-09T05:17:27,097 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:27,097 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:27,097 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. after waiting 0 ms 2024-12-09T05:17:27,097 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:27,097 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:27,097 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 850d12d27f942676099898eccdc0c98c: 2024-12-09T05:17:27,098 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:17:27,098 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733721447098"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721447098"}]},"ts":"1733721447098"} 2024-12-09T05:17:27,100 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T05:17:27,101 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:17:27,101 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721447101"}]},"ts":"1733721447101"} 2024-12-09T05:17:27,102 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-09T05:17:27,106 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=850d12d27f942676099898eccdc0c98c, ASSIGN}] 2024-12-09T05:17:27,107 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=850d12d27f942676099898eccdc0c98c, ASSIGN 2024-12-09T05:17:27,107 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=850d12d27f942676099898eccdc0c98c, ASSIGN; state=OFFLINE, location=41a709354867,39913,1733721445744; forceNewPlan=false, retain=false 2024-12-09T05:17:27,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:27,258 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=850d12d27f942676099898eccdc0c98c, regionState=OPENING, regionLocation=41a709354867,39913,1733721445744 2024-12-09T05:17:27,260 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 850d12d27f942676099898eccdc0c98c, server=41a709354867,39913,1733721445744}] 2024-12-09T05:17:27,412 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 41a709354867,39913,1733721445744 2024-12-09T05:17:27,415 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:27,416 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 850d12d27f942676099898eccdc0c98c, NAME => 'TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:17:27,416 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:27,416 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:17:27,416 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:27,416 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:27,417 INFO [StoreOpener-850d12d27f942676099898eccdc0c98c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:27,419 INFO [StoreOpener-850d12d27f942676099898eccdc0c98c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 850d12d27f942676099898eccdc0c98c columnFamilyName info 2024-12-09T05:17:27,419 DEBUG [StoreOpener-850d12d27f942676099898eccdc0c98c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:27,419 INFO [StoreOpener-850d12d27f942676099898eccdc0c98c-1 {}] regionserver.HStore(327): Store=850d12d27f942676099898eccdc0c98c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:17:27,420 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:27,420 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:27,422 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:27,424 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:17:27,424 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 850d12d27f942676099898eccdc0c98c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780099, jitterRate=-0.008053362369537354}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:17:27,425 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 850d12d27f942676099898eccdc0c98c: 2024-12-09T05:17:27,426 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c., pid=11, masterSystemTime=1733721447412 2024-12-09T05:17:27,427 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:27,427 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:27,428 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=850d12d27f942676099898eccdc0c98c, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,39913,1733721445744 2024-12-09T05:17:27,431 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-09T05:17:27,431 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 850d12d27f942676099898eccdc0c98c, server=41a709354867,39913,1733721445744 in 170 msec 2024-12-09T05:17:27,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T05:17:27,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=850d12d27f942676099898eccdc0c98c, ASSIGN in 325 msec 2024-12-09T05:17:27,434 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:17:27,434 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721447434"}]},"ts":"1733721447434"} 2024-12-09T05:17:27,435 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-09T05:17:27,438 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:17:27,439 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRolling in 364 msec 2024-12-09T05:17:28,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:29,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:29,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,492 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,492 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,492 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,492 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:29,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,003 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T05:17:30,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,020 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,020 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,020 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,020 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,024 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,024 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,025 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:30,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:31,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:31,988 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T05:17:31,989 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-09T05:17:31,989 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-09T05:17:32,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:33,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:34,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:35,188 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T05:17:35,188 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T05:17:35,189 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-09T05:17:35,189 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T05:17:35,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:36,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35729 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T05:17:37,078 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling, procId: 9 completed 2024-12-09T05:17:37,080 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-09T05:17:37,080 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:37,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:37,091 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 850d12d27f942676099898eccdc0c98c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:17:37,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/2de9308b3fb2412fb7954e02f1f080e1 is 1080, key is row0001/info:/1733721457083/Put/seqid=0 2024-12-09T05:17:37,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39913 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=850d12d27f942676099898eccdc0c98c, server=41a709354867,39913,1733721445744 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T05:17:37,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39913 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50972 deadline: 1733721467115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=850d12d27f942676099898eccdc0c98c, server=41a709354867,39913,1733721445744 2024-12-09T05:17:37,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741839_1015 (size=12509) 2024-12-09T05:17:37,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741839_1015 (size=12509) 2024-12-09T05:17:37,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/2de9308b3fb2412fb7954e02f1f080e1 2024-12-09T05:17:37,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/2de9308b3fb2412fb7954e02f1f080e1 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/2de9308b3fb2412fb7954e02f1f080e1 2024-12-09T05:17:37,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/2de9308b3fb2412fb7954e02f1f080e1, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T05:17:37,132 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 850d12d27f942676099898eccdc0c98c in 41ms, sequenceid=11, compaction requested=false 2024-12-09T05:17:37,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 850d12d27f942676099898eccdc0c98c: 2024-12-09T05:17:37,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:38,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:38,995 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 688aa77f4712fd33e61f733d63bfbd0a, had cached 0 bytes from a total of 23930 2024-12-09T05:17:39,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:40,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:41,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:42,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:43,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:44,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:45,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:45,579 DEBUG [master/41a709354867:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=1, created chunk count=14, reused chunk count=37, reuseRatio=72.55% 2024-12-09T05:17:45,579 DEBUG [master/41a709354867:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-09T05:17:46,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:47,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:47,157 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 850d12d27f942676099898eccdc0c98c 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T05:17:47,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/951105ea527d4429a38a32e5e4f1bddf is 1080, key is row0008/info:/1733721457092/Put/seqid=0 2024-12-09T05:17:47,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741840_1016 (size=29761) 2024-12-09T05:17:47,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741840_1016 (size=29761) 2024-12-09T05:17:47,167 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/951105ea527d4429a38a32e5e4f1bddf 2024-12-09T05:17:47,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/951105ea527d4429a38a32e5e4f1bddf as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/951105ea527d4429a38a32e5e4f1bddf 2024-12-09T05:17:47,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/951105ea527d4429a38a32e5e4f1bddf, entries=23, sequenceid=37, filesize=29.1 K 2024-12-09T05:17:47,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 850d12d27f942676099898eccdc0c98c in 23ms, sequenceid=37, compaction requested=false 2024-12-09T05:17:47,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 850d12d27f942676099898eccdc0c98c: 2024-12-09T05:17:47,180 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=41.3 K, sizeToCheck=16.0 K 2024-12-09T05:17:47,180 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T05:17:47,180 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/951105ea527d4429a38a32e5e4f1bddf because midkey is the same as first or last row 2024-12-09T05:17:47,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:48,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:49,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:49,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 850d12d27f942676099898eccdc0c98c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:17:49,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/069733266a1c4eb4aabffb56479d945f is 1080, key is row0031/info:/1733721467157/Put/seqid=0 2024-12-09T05:17:49,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741841_1017 (size=12509) 2024-12-09T05:17:49,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741841_1017 (size=12509) 2024-12-09T05:17:49,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/069733266a1c4eb4aabffb56479d945f 2024-12-09T05:17:49,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/069733266a1c4eb4aabffb56479d945f as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/069733266a1c4eb4aabffb56479d945f 2024-12-09T05:17:49,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/069733266a1c4eb4aabffb56479d945f, entries=7, sequenceid=47, filesize=12.2 K 2024-12-09T05:17:49,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 850d12d27f942676099898eccdc0c98c in 25ms, sequenceid=47, compaction requested=true 2024-12-09T05:17:49,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 850d12d27f942676099898eccdc0c98c: 2024-12-09T05:17:49,191 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=53.5 K, sizeToCheck=16.0 K 2024-12-09T05:17:49,192 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T05:17:49,192 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/951105ea527d4429a38a32e5e4f1bddf because midkey is the same as first or last row 2024-12-09T05:17:49,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 850d12d27f942676099898eccdc0c98c:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T05:17:49,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:17:49,192 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T05:17:49,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:49,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 850d12d27f942676099898eccdc0c98c 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T05:17:49,194 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T05:17:49,194 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1540): 850d12d27f942676099898eccdc0c98c/info is initiating minor compaction (all files) 2024-12-09T05:17:49,194 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 850d12d27f942676099898eccdc0c98c/info in TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:49,194 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/2de9308b3fb2412fb7954e02f1f080e1, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/951105ea527d4429a38a32e5e4f1bddf, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/069733266a1c4eb4aabffb56479d945f] into tmpdir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp, totalSize=53.5 K 2024-12-09T05:17:49,195 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2de9308b3fb2412fb7954e02f1f080e1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733721457083 2024-12-09T05:17:49,195 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 951105ea527d4429a38a32e5e4f1bddf, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733721457092 2024-12-09T05:17:49,196 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 069733266a1c4eb4aabffb56479d945f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733721467157 2024-12-09T05:17:49,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/5e1e29e4c72c4752aac1e2069006f9a6 is 1080, key is row0038/info:/1733721469166/Put/seqid=0 2024-12-09T05:17:49,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:49,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741842_1018 (size=29761) 2024-12-09T05:17:49,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741842_1018 (size=29761) 2024-12-09T05:17:49,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/5e1e29e4c72c4752aac1e2069006f9a6 2024-12-09T05:17:49,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/5e1e29e4c72c4752aac1e2069006f9a6 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/5e1e29e4c72c4752aac1e2069006f9a6 2024-12-09T05:17:49,215 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 850d12d27f942676099898eccdc0c98c#info#compaction#43 average throughput is 9.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T05:17:49,216 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/22d4cb463ae34144aff702049f54816c is 1080, key is row0001/info:/1733721457083/Put/seqid=0 2024-12-09T05:17:49,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/5e1e29e4c72c4752aac1e2069006f9a6, entries=23, sequenceid=73, filesize=29.1 K 2024-12-09T05:17:49,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741843_1019 (size=44978) 2024-12-09T05:17:49,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=4.20 KB/4304 for 850d12d27f942676099898eccdc0c98c in 29ms, sequenceid=73, compaction requested=false 2024-12-09T05:17:49,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741843_1019 (size=44978) 2024-12-09T05:17:49,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 850d12d27f942676099898eccdc0c98c: 2024-12-09T05:17:49,222 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=82.6 K, sizeToCheck=16.0 K 2024-12-09T05:17:49,222 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T05:17:49,222 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/951105ea527d4429a38a32e5e4f1bddf because midkey is the same as first or last row 2024-12-09T05:17:49,230 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/22d4cb463ae34144aff702049f54816c as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/22d4cb463ae34144aff702049f54816c 2024-12-09T05:17:49,236 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 850d12d27f942676099898eccdc0c98c/info of 850d12d27f942676099898eccdc0c98c into 22d4cb463ae34144aff702049f54816c(size=43.9 K), total size for store is 73.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T05:17:49,236 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 850d12d27f942676099898eccdc0c98c: 2024-12-09T05:17:49,236 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c., storeName=850d12d27f942676099898eccdc0c98c/info, priority=13, startTime=1733721469192; duration=0sec 2024-12-09T05:17:49,236 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=73.0 K, sizeToCheck=16.0 K 2024-12-09T05:17:49,236 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T05:17:49,236 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/22d4cb463ae34144aff702049f54816c because midkey is the same as first or last row 2024-12-09T05:17:49,236 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:17:49,236 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 850d12d27f942676099898eccdc0c98c:info 2024-12-09T05:17:50,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:51,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:51,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:51,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 850d12d27f942676099898eccdc0c98c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:17:51,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/bcf239746ad94f73aa6c284e19053c50 is 1080, key is row0061/info:/1733721469193/Put/seqid=0 2024-12-09T05:17:51,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741844_1020 (size=12509) 2024-12-09T05:17:51,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741844_1020 (size=12509) 2024-12-09T05:17:51,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/bcf239746ad94f73aa6c284e19053c50 2024-12-09T05:17:51,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/bcf239746ad94f73aa6c284e19053c50 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/bcf239746ad94f73aa6c284e19053c50 2024-12-09T05:17:51,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/bcf239746ad94f73aa6c284e19053c50, entries=7, sequenceid=84, filesize=12.2 K 2024-12-09T05:17:51,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for 850d12d27f942676099898eccdc0c98c in 24ms, sequenceid=84, compaction requested=true 2024-12-09T05:17:51,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 850d12d27f942676099898eccdc0c98c: 2024-12-09T05:17:51,227 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=85.2 K, sizeToCheck=16.0 K 2024-12-09T05:17:51,227 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T05:17:51,227 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/22d4cb463ae34144aff702049f54816c because midkey is the same as first or last row 2024-12-09T05:17:51,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 850d12d27f942676099898eccdc0c98c:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T05:17:51,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:17:51,227 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T05:17:51,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:51,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 850d12d27f942676099898eccdc0c98c 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-12-09T05:17:51,228 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 87248 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T05:17:51,228 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1540): 850d12d27f942676099898eccdc0c98c/info is initiating minor compaction (all files) 2024-12-09T05:17:51,228 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 850d12d27f942676099898eccdc0c98c/info in TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:51,228 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/22d4cb463ae34144aff702049f54816c, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/5e1e29e4c72c4752aac1e2069006f9a6, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/bcf239746ad94f73aa6c284e19053c50] into tmpdir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp, totalSize=85.2 K 2024-12-09T05:17:51,229 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22d4cb463ae34144aff702049f54816c, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733721457083 2024-12-09T05:17:51,229 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e1e29e4c72c4752aac1e2069006f9a6, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733721469166 2024-12-09T05:17:51,230 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcf239746ad94f73aa6c284e19053c50, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1733721469193 2024-12-09T05:17:51,233 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/80cfdf26267541f59f4e378c480e89b2 is 1080, key is row0068/info:/1733721471203/Put/seqid=0 2024-12-09T05:17:51,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741845_1021 (size=28684) 2024-12-09T05:17:51,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741845_1021 (size=28684) 2024-12-09T05:17:51,244 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=109 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/80cfdf26267541f59f4e378c480e89b2 2024-12-09T05:17:51,246 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 850d12d27f942676099898eccdc0c98c#info#compaction#46 average throughput is 22.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T05:17:51,247 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/7618143e5ddf4f6daaa0ae81a28bba14 is 1080, key is row0001/info:/1733721457083/Put/seqid=0 2024-12-09T05:17:51,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/80cfdf26267541f59f4e378c480e89b2 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/80cfdf26267541f59f4e378c480e89b2 2024-12-09T05:17:51,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741846_1022 (size=77532) 2024-12-09T05:17:51,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741846_1022 (size=77532) 2024-12-09T05:17:51,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/80cfdf26267541f59f4e378c480e89b2, entries=22, sequenceid=109, filesize=28.0 K 2024-12-09T05:17:51,257 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/7618143e5ddf4f6daaa0ae81a28bba14 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/7618143e5ddf4f6daaa0ae81a28bba14 2024-12-09T05:17:51,258 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=7.36 KB/7532 for 850d12d27f942676099898eccdc0c98c in 31ms, sequenceid=109, compaction requested=false 2024-12-09T05:17:51,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 850d12d27f942676099898eccdc0c98c: 2024-12-09T05:17:51,258 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=113.2 K, sizeToCheck=16.0 K 2024-12-09T05:17:51,258 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T05:17:51,258 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/22d4cb463ae34144aff702049f54816c because midkey is the same as first or last row 2024-12-09T05:17:51,262 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 850d12d27f942676099898eccdc0c98c/info of 850d12d27f942676099898eccdc0c98c into 7618143e5ddf4f6daaa0ae81a28bba14(size=75.7 K), total size for store is 103.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T05:17:51,262 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 850d12d27f942676099898eccdc0c98c: 2024-12-09T05:17:51,263 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c., storeName=850d12d27f942676099898eccdc0c98c/info, priority=13, startTime=1733721471227; duration=0sec 2024-12-09T05:17:51,263 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=103.7 K, sizeToCheck=16.0 K 2024-12-09T05:17:51,263 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T05:17:51,264 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:17:51,264 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:17:51,264 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 850d12d27f942676099898eccdc0c98c:info 2024-12-09T05:17:51,265 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35729 {}] assignment.AssignmentManager(1346): Split request from 41a709354867,39913,1733721445744, parent={ENCODED => 850d12d27f942676099898eccdc0c98c, NAME => 'TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-09T05:17:51,271 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35729 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=41a709354867,39913,1733721445744 2024-12-09T05:17:51,275 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35729 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=850d12d27f942676099898eccdc0c98c, daughterA=35e9bfca82c3f49a16c93d34a671827f, daughterB=b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:17:51,277 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=850d12d27f942676099898eccdc0c98c, daughterA=35e9bfca82c3f49a16c93d34a671827f, daughterB=b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:17:51,277 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=850d12d27f942676099898eccdc0c98c, daughterA=35e9bfca82c3f49a16c93d34a671827f, daughterB=b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:17:51,277 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=850d12d27f942676099898eccdc0c98c, daughterA=35e9bfca82c3f49a16c93d34a671827f, daughterB=b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:17:51,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=850d12d27f942676099898eccdc0c98c, UNASSIGN}] 2024-12-09T05:17:51,284 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=850d12d27f942676099898eccdc0c98c, UNASSIGN 2024-12-09T05:17:51,285 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=850d12d27f942676099898eccdc0c98c, regionState=CLOSING, regionLocation=41a709354867,39913,1733721445744 2024-12-09T05:17:51,287 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T05:17:51,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE; CloseRegionProcedure 850d12d27f942676099898eccdc0c98c, server=41a709354867,39913,1733721445744}] 2024-12-09T05:17:51,442 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 41a709354867,39913,1733721445744 2024-12-09T05:17:51,444 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(124): Close 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:51,444 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-09T05:17:51,445 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1681): Closing 850d12d27f942676099898eccdc0c98c, disabling compactions & flushes 2024-12-09T05:17:51,445 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:51,445 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:51,445 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. after waiting 0 ms 2024-12-09T05:17:51,445 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:51,445 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(2837): Flushing 850d12d27f942676099898eccdc0c98c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:17:51,449 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/cb44807ff99e46cf9dac7142fd5f408b is 1080, key is row0090/info:/1733721471228/Put/seqid=0 2024-12-09T05:17:51,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741847_1023 (size=12509) 2024-12-09T05:17:51,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741847_1023 (size=12509) 2024-12-09T05:17:51,458 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/cb44807ff99e46cf9dac7142fd5f408b 2024-12-09T05:17:51,464 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/.tmp/info/cb44807ff99e46cf9dac7142fd5f408b as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/cb44807ff99e46cf9dac7142fd5f408b 2024-12-09T05:17:51,469 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/cb44807ff99e46cf9dac7142fd5f408b, entries=7, sequenceid=120, filesize=12.2 K 2024-12-09T05:17:51,470 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 850d12d27f942676099898eccdc0c98c in 24ms, sequenceid=120, compaction requested=true 2024-12-09T05:17:51,471 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/2de9308b3fb2412fb7954e02f1f080e1, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/951105ea527d4429a38a32e5e4f1bddf, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/22d4cb463ae34144aff702049f54816c, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/069733266a1c4eb4aabffb56479d945f, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/5e1e29e4c72c4752aac1e2069006f9a6, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/bcf239746ad94f73aa6c284e19053c50] to archive 2024-12-09T05:17:51,471 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T05:17:51,473 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/2de9308b3fb2412fb7954e02f1f080e1 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/2de9308b3fb2412fb7954e02f1f080e1 2024-12-09T05:17:51,474 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/951105ea527d4429a38a32e5e4f1bddf to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/951105ea527d4429a38a32e5e4f1bddf 2024-12-09T05:17:51,475 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/22d4cb463ae34144aff702049f54816c to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/22d4cb463ae34144aff702049f54816c 2024-12-09T05:17:51,476 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/069733266a1c4eb4aabffb56479d945f to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/069733266a1c4eb4aabffb56479d945f 2024-12-09T05:17:51,478 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/5e1e29e4c72c4752aac1e2069006f9a6 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/5e1e29e4c72c4752aac1e2069006f9a6 2024-12-09T05:17:51,479 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/bcf239746ad94f73aa6c284e19053c50 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/bcf239746ad94f73aa6c284e19053c50 2024-12-09T05:17:51,483 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=1 2024-12-09T05:17:51,484 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. 2024-12-09T05:17:51,484 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1635): Region close journal for 850d12d27f942676099898eccdc0c98c: 2024-12-09T05:17:51,486 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(170): Closed 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:51,486 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=850d12d27f942676099898eccdc0c98c, regionState=CLOSED 2024-12-09T05:17:51,490 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=13 2024-12-09T05:17:51,490 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=13, state=SUCCESS; CloseRegionProcedure 850d12d27f942676099898eccdc0c98c, server=41a709354867,39913,1733721445744 in 201 msec 2024-12-09T05:17:51,491 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-09T05:17:51,491 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=850d12d27f942676099898eccdc0c98c, UNASSIGN in 207 msec 2024-12-09T05:17:51,512 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:51,514 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=12 splitting 3 storefiles, region=850d12d27f942676099898eccdc0c98c, threads=3 2024-12-09T05:17:51,514 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/7618143e5ddf4f6daaa0ae81a28bba14 for region: 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:51,515 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/80cfdf26267541f59f4e378c480e89b2 for region: 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:51,515 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/cb44807ff99e46cf9dac7142fd5f408b for region: 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:51,525 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/80cfdf26267541f59f4e378c480e89b2, top=true 2024-12-09T05:17:51,525 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/cb44807ff99e46cf9dac7142fd5f408b, top=true 2024-12-09T05:17:51,530 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-80cfdf26267541f59f4e378c480e89b2 for child: b7fac5053b0b382751073fe7134f7b9c, parent: 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:51,530 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/80cfdf26267541f59f4e378c480e89b2 for region: 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:51,533 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-cb44807ff99e46cf9dac7142fd5f408b for child: b7fac5053b0b382751073fe7134f7b9c, parent: 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:51,533 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/cb44807ff99e46cf9dac7142fd5f408b for region: 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:51,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741848_1024 (size=27) 2024-12-09T05:17:51,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741848_1024 (size=27) 2024-12-09T05:17:51,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741849_1025 (size=27) 2024-12-09T05:17:51,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741849_1025 (size=27) 2024-12-09T05:17:51,546 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/7618143e5ddf4f6daaa0ae81a28bba14 for region: 850d12d27f942676099898eccdc0c98c 2024-12-09T05:17:51,546 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=12 split storefiles for region 850d12d27f942676099898eccdc0c98c Daughter A: [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f/info/7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c] storefiles, Daughter B: [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-80cfdf26267541f59f4e378c480e89b2, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-cb44807ff99e46cf9dac7142fd5f408b] storefiles. 2024-12-09T05:17:51,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741850_1026 (size=71) 2024-12-09T05:17:51,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741850_1026 (size=71) 2024-12-09T05:17:51,557 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:51,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741851_1027 (size=71) 2024-12-09T05:17:51,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741851_1027 (size=71) 2024-12-09T05:17:51,570 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:51,581 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=-1 2024-12-09T05:17:51,583 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=-1 2024-12-09T05:17:51,586 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733721471585"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733721471585"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733721471585"}]},"ts":"1733721471585"} 2024-12-09T05:17:51,586 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733721471585"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721471585"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733721471585"}]},"ts":"1733721471585"} 2024-12-09T05:17:51,586 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733721471585"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721471585"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733721471585"}]},"ts":"1733721471585"} 2024-12-09T05:17:51,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39913 {}] regionserver.HRegion(8581): Flush requested on 1588230740 2024-12-09T05:17:51,617 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-09T05:17:51,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=4.75 KB heapSize=8.29 KB 2024-12-09T05:17:51,623 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=35e9bfca82c3f49a16c93d34a671827f, ASSIGN}, {pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b7fac5053b0b382751073fe7134f7b9c, ASSIGN}] 2024-12-09T05:17:51,624 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b7fac5053b0b382751073fe7134f7b9c, ASSIGN 2024-12-09T05:17:51,624 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=35e9bfca82c3f49a16c93d34a671827f, ASSIGN 2024-12-09T05:17:51,625 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=35e9bfca82c3f49a16c93d34a671827f, ASSIGN; state=SPLITTING_NEW, location=41a709354867,39913,1733721445744; forceNewPlan=false, retain=false 2024-12-09T05:17:51,625 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b7fac5053b0b382751073fe7134f7b9c, ASSIGN; state=SPLITTING_NEW, location=41a709354867,39913,1733721445744; forceNewPlan=false, retain=false 2024-12-09T05:17:51,635 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/.tmp/info/e22d1cb89bf24325835231e7d7415ca5 is 193, key is TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c./info:regioninfo/1733721471585/Put/seqid=0 2024-12-09T05:17:51,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741852_1028 (size=9423) 2024-12-09T05:17:51,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741852_1028 (size=9423) 2024-12-09T05:17:51,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.54 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/.tmp/info/e22d1cb89bf24325835231e7d7415ca5 2024-12-09T05:17:51,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/.tmp/table/40e8d91aa6d446c494477d5608242d97 is 65, key is TestLogRolling-testLogRolling/table:state/1733721447434/Put/seqid=0 2024-12-09T05:17:51,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741853_1029 (size=5412) 2024-12-09T05:17:51,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741853_1029 (size=5412) 2024-12-09T05:17:51,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=216 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/.tmp/table/40e8d91aa6d446c494477d5608242d97 2024-12-09T05:17:51,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/.tmp/info/e22d1cb89bf24325835231e7d7415ca5 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/info/e22d1cb89bf24325835231e7d7415ca5 2024-12-09T05:17:51,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/info/e22d1cb89bf24325835231e7d7415ca5, entries=29, sequenceid=17, filesize=9.2 K 2024-12-09T05:17:51,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/.tmp/table/40e8d91aa6d446c494477d5608242d97 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/table/40e8d91aa6d446c494477d5608242d97 2024-12-09T05:17:51,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/table/40e8d91aa6d446c494477d5608242d97, entries=4, sequenceid=17, filesize=5.3 K 2024-12-09T05:17:51,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~4.75 KB/4869, heapSize ~8.01 KB/8200, currentSize=0 B/0 for 1588230740 in 73ms, sequenceid=17, compaction requested=false 2024-12-09T05:17:51,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-09T05:17:51,775 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=b7fac5053b0b382751073fe7134f7b9c, regionState=OPENING, regionLocation=41a709354867,39913,1733721445744 2024-12-09T05:17:51,775 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=35e9bfca82c3f49a16c93d34a671827f, regionState=OPENING, regionLocation=41a709354867,39913,1733721445744 2024-12-09T05:17:51,777 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=15, state=RUNNABLE; OpenRegionProcedure 35e9bfca82c3f49a16c93d34a671827f, server=41a709354867,39913,1733721445744}] 2024-12-09T05:17:51,778 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=16, state=RUNNABLE; OpenRegionProcedure b7fac5053b0b382751073fe7134f7b9c, server=41a709354867,39913,1733721445744}] 2024-12-09T05:17:51,929 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 41a709354867,39913,1733721445744 2024-12-09T05:17:51,933 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f. 2024-12-09T05:17:51,933 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7285): Opening region: {ENCODED => 35e9bfca82c3f49a16c93d34a671827f, NAME => 'TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-09T05:17:51,933 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 35e9bfca82c3f49a16c93d34a671827f 2024-12-09T05:17:51,933 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:17:51,934 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7327): checking encryption for 35e9bfca82c3f49a16c93d34a671827f 2024-12-09T05:17:51,934 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7330): checking classloading for 35e9bfca82c3f49a16c93d34a671827f 2024-12-09T05:17:51,935 INFO [StoreOpener-35e9bfca82c3f49a16c93d34a671827f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 35e9bfca82c3f49a16c93d34a671827f 2024-12-09T05:17:51,936 INFO [StoreOpener-35e9bfca82c3f49a16c93d34a671827f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 35e9bfca82c3f49a16c93d34a671827f columnFamilyName info 2024-12-09T05:17:51,936 DEBUG [StoreOpener-35e9bfca82c3f49a16c93d34a671827f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:51,947 DEBUG [StoreOpener-35e9bfca82c3f49a16c93d34a671827f-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f/info/7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c->hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/7618143e5ddf4f6daaa0ae81a28bba14-bottom 2024-12-09T05:17:51,948 INFO [StoreOpener-35e9bfca82c3f49a16c93d34a671827f-1 {}] regionserver.HStore(327): Store=35e9bfca82c3f49a16c93d34a671827f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:17:51,948 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f 2024-12-09T05:17:51,949 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f 2024-12-09T05:17:51,951 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1085): writing seq id for 35e9bfca82c3f49a16c93d34a671827f 2024-12-09T05:17:51,952 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1102): Opened 35e9bfca82c3f49a16c93d34a671827f; next sequenceid=124; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824251, jitterRate=0.04808935523033142}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:17:51,953 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1001): Region open journal for 35e9bfca82c3f49a16c93d34a671827f: 2024-12-09T05:17:51,953 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f., pid=17, masterSystemTime=1733721471929 2024-12-09T05:17:51,954 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(403): Add compact mark for store 35e9bfca82c3f49a16c93d34a671827f:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T05:17:51,954 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:17:51,954 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-09T05:17:51,954 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f. 2024-12-09T05:17:51,954 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1540): 35e9bfca82c3f49a16c93d34a671827f/info is initiating minor compaction (all files) 2024-12-09T05:17:51,954 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35e9bfca82c3f49a16c93d34a671827f/info in TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f. 2024-12-09T05:17:51,954 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f/info/7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c->hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/7618143e5ddf4f6daaa0ae81a28bba14-bottom] into tmpdir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f/.tmp, totalSize=75.7 K 2024-12-09T05:17:51,955 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1733721457083 2024-12-09T05:17:51,955 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f. 2024-12-09T05:17:51,955 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f. 2024-12-09T05:17:51,955 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:17:51,955 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7285): Opening region: {ENCODED => b7fac5053b0b382751073fe7134f7b9c, NAME => 'TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-09T05:17:51,956 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:17:51,956 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=35e9bfca82c3f49a16c93d34a671827f, regionState=OPEN, openSeqNum=124, regionLocation=41a709354867,39913,1733721445744 2024-12-09T05:17:51,956 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:17:51,956 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7327): checking encryption for b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:17:51,956 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7330): checking classloading for b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:17:51,957 INFO [StoreOpener-b7fac5053b0b382751073fe7134f7b9c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:17:51,958 INFO [StoreOpener-b7fac5053b0b382751073fe7134f7b9c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7fac5053b0b382751073fe7134f7b9c columnFamilyName info 2024-12-09T05:17:51,958 DEBUG [StoreOpener-b7fac5053b0b382751073fe7134f7b9c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:17:51,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=15 2024-12-09T05:17:51,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=15, state=SUCCESS; OpenRegionProcedure 35e9bfca82c3f49a16c93d34a671827f, server=41a709354867,39913,1733721445744 in 180 msec 2024-12-09T05:17:51,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=35e9bfca82c3f49a16c93d34a671827f, ASSIGN in 336 msec 2024-12-09T05:17:51,967 DEBUG [StoreOpener-b7fac5053b0b382751073fe7134f7b9c-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c->hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/7618143e5ddf4f6daaa0ae81a28bba14-top 2024-12-09T05:17:51,971 DEBUG [StoreOpener-b7fac5053b0b382751073fe7134f7b9c-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-80cfdf26267541f59f4e378c480e89b2 2024-12-09T05:17:51,975 DEBUG [StoreOpener-b7fac5053b0b382751073fe7134f7b9c-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-cb44807ff99e46cf9dac7142fd5f408b 2024-12-09T05:17:51,976 INFO [StoreOpener-b7fac5053b0b382751073fe7134f7b9c-1 {}] regionserver.HStore(327): Store=b7fac5053b0b382751073fe7134f7b9c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:17:51,976 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35e9bfca82c3f49a16c93d34a671827f#info#compaction#50 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T05:17:51,976 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:17:51,977 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f/.tmp/info/de5ec606580e4dd490f23b04aff6b911 is 1080, key is row0001/info:/1733721457083/Put/seqid=0 2024-12-09T05:17:51,978 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:17:51,980 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1085): writing seq id for b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:17:51,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741854_1030 (size=70862) 2024-12-09T05:17:51,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741854_1030 (size=70862) 2024-12-09T05:17:51,981 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1102): Opened b7fac5053b0b382751073fe7134f7b9c; next sequenceid=124; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705354, jitterRate=-0.10309690237045288}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:17:51,981 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1001): Region open journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:17:51,982 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c., pid=18, masterSystemTime=1733721471929 2024-12-09T05:17:51,982 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(403): Add compact mark for store b7fac5053b0b382751073fe7134f7b9c:info, priority=-2147483648, current under compaction store size is 2 2024-12-09T05:17:51,982 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:17:51,982 DEBUG [RS:0;41a709354867:39913-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T05:17:51,983 INFO [RS:0;41a709354867:39913-longCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:17:51,983 DEBUG [RS:0;41a709354867:39913-longCompactions-0 {}] regionserver.HStore(1540): b7fac5053b0b382751073fe7134f7b9c/info is initiating minor compaction (all files) 2024-12-09T05:17:51,983 INFO [RS:0;41a709354867:39913-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7fac5053b0b382751073fe7134f7b9c/info in TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:17:51,984 INFO [RS:0;41a709354867:39913-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c->hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/7618143e5ddf4f6daaa0ae81a28bba14-top, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-80cfdf26267541f59f4e378c480e89b2, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-cb44807ff99e46cf9dac7142fd5f408b] into tmpdir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp, totalSize=115.9 K 2024-12-09T05:17:51,984 DEBUG [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:17:51,984 INFO [RS_OPEN_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:17:51,985 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=b7fac5053b0b382751073fe7134f7b9c, regionState=OPEN, openSeqNum=124, regionLocation=41a709354867,39913,1733721445744 2024-12-09T05:17:51,986 DEBUG [RS:0;41a709354867:39913-longCompactions-0 {}] compactions.Compactor(224): Compacting 7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1733721457083 2024-12-09T05:17:51,986 DEBUG [RS:0;41a709354867:39913-longCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-80cfdf26267541f59f4e378c480e89b2, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=109, earliestPutTs=1733721471203 2024-12-09T05:17:51,987 DEBUG [RS:0;41a709354867:39913-longCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-cb44807ff99e46cf9dac7142fd5f408b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733721471228 2024-12-09T05:17:51,988 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f/.tmp/info/de5ec606580e4dd490f23b04aff6b911 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f/info/de5ec606580e4dd490f23b04aff6b911 2024-12-09T05:17:51,989 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=16 2024-12-09T05:17:51,989 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=16, state=SUCCESS; OpenRegionProcedure b7fac5053b0b382751073fe7134f7b9c, server=41a709354867,39913,1733721445744 in 209 msec 2024-12-09T05:17:51,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=12 2024-12-09T05:17:51,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b7fac5053b0b382751073fe7134f7b9c, ASSIGN in 366 msec 2024-12-09T05:17:51,992 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=850d12d27f942676099898eccdc0c98c, daughterA=35e9bfca82c3f49a16c93d34a671827f, daughterB=b7fac5053b0b382751073fe7134f7b9c in 720 msec 2024-12-09T05:17:51,995 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 1 (all) file(s) in 35e9bfca82c3f49a16c93d34a671827f/info of 35e9bfca82c3f49a16c93d34a671827f into de5ec606580e4dd490f23b04aff6b911(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T05:17:51,995 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35e9bfca82c3f49a16c93d34a671827f: 2024-12-09T05:17:51,995 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f., storeName=35e9bfca82c3f49a16c93d34a671827f/info, priority=15, startTime=1733721471954; duration=0sec 2024-12-09T05:17:51,995 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:17:51,995 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35e9bfca82c3f49a16c93d34a671827f:info 2024-12-09T05:17:52,017 INFO [RS:0;41a709354867:39913-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7fac5053b0b382751073fe7134f7b9c#info#compaction#51 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T05:17:52,018 DEBUG [RS:0;41a709354867:39913-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/0bd35a841a9a42cbb1647c8be0741b5a is 1080, key is row0062/info:/1733721469194/Put/seqid=0 2024-12-09T05:17:52,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741855_1031 (size=42984) 2024-12-09T05:17:52,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741855_1031 (size=42984) 2024-12-09T05:17:52,029 DEBUG [RS:0;41a709354867:39913-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/0bd35a841a9a42cbb1647c8be0741b5a as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/0bd35a841a9a42cbb1647c8be0741b5a 2024-12-09T05:17:52,035 INFO [RS:0;41a709354867:39913-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7fac5053b0b382751073fe7134f7b9c/info of b7fac5053b0b382751073fe7134f7b9c into 0bd35a841a9a42cbb1647c8be0741b5a(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T05:17:52,035 DEBUG [RS:0;41a709354867:39913-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:17:52,035 INFO [RS:0;41a709354867:39913-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c., storeName=b7fac5053b0b382751073fe7134f7b9c/info, priority=13, startTime=1733721471982; duration=0sec 2024-12-09T05:17:52,035 DEBUG [RS:0;41a709354867:39913-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:17:52,035 DEBUG [RS:0;41a709354867:39913-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7fac5053b0b382751073fe7134f7b9c:info 2024-12-09T05:17:52,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:52,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,736 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T05:17:52,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:52,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T05:17:53,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39913 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50972 deadline: 1733721483237, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733721447073.850d12d27f942676099898eccdc0c98c. is not online on 41a709354867,39913,1733721445744 2024-12-09T05:17:54,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:55,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:55,674 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T05:17:56,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:57,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:58,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:59,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:17:59,977 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:17:59,979 INFO [RS-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43634, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:18:00,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:01,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:02,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:03,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:03,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:03,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7fac5053b0b382751073fe7134f7b9c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:18:03,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/d515a3b4ce404d9fa2f02ddc1de7a0e2 is 1080, key is row0097/info:/1733721483349/Put/seqid=0 2024-12-09T05:18:03,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741856_1032 (size=12515) 2024-12-09T05:18:03,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741856_1032 (size=12515) 2024-12-09T05:18:03,370 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/d515a3b4ce404d9fa2f02ddc1de7a0e2 2024-12-09T05:18:03,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/d515a3b4ce404d9fa2f02ddc1de7a0e2 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/d515a3b4ce404d9fa2f02ddc1de7a0e2 2024-12-09T05:18:03,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/d515a3b4ce404d9fa2f02ddc1de7a0e2, entries=7, sequenceid=134, filesize=12.2 K 2024-12-09T05:18:03,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for b7fac5053b0b382751073fe7134f7b9c in 27ms, sequenceid=134, compaction requested=false 2024-12-09T05:18:03,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:03,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:03,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7fac5053b0b382751073fe7134f7b9c 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-12-09T05:18:03,389 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/3ba9588a4a3e43e8a1ef227317224a02 is 1080, key is row0104/info:/1733721483357/Put/seqid=0 2024-12-09T05:18:03,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741857_1033 (size=27628) 2024-12-09T05:18:03,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741857_1033 (size=27628) 2024-12-09T05:18:03,400 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/3ba9588a4a3e43e8a1ef227317224a02 2024-12-09T05:18:03,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/3ba9588a4a3e43e8a1ef227317224a02 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/3ba9588a4a3e43e8a1ef227317224a02 2024-12-09T05:18:03,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/3ba9588a4a3e43e8a1ef227317224a02, entries=21, sequenceid=158, filesize=27.0 K 2024-12-09T05:18:03,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=4.20 KB/4304 for b7fac5053b0b382751073fe7134f7b9c in 29ms, sequenceid=158, compaction requested=true 2024-12-09T05:18:03,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:03,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7fac5053b0b382751073fe7134f7b9c:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T05:18:03,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:18:03,414 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T05:18:03,415 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 83127 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T05:18:03,415 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1540): b7fac5053b0b382751073fe7134f7b9c/info is initiating minor compaction (all files) 2024-12-09T05:18:03,415 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7fac5053b0b382751073fe7134f7b9c/info in TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:18:03,415 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/0bd35a841a9a42cbb1647c8be0741b5a, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/d515a3b4ce404d9fa2f02ddc1de7a0e2, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/3ba9588a4a3e43e8a1ef227317224a02] into tmpdir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp, totalSize=81.2 K 2024-12-09T05:18:03,416 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bd35a841a9a42cbb1647c8be0741b5a, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733721469194 2024-12-09T05:18:03,416 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting d515a3b4ce404d9fa2f02ddc1de7a0e2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733721483349 2024-12-09T05:18:03,417 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ba9588a4a3e43e8a1ef227317224a02, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733721483357 2024-12-09T05:18:03,430 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7fac5053b0b382751073fe7134f7b9c#info#compaction#54 average throughput is 32.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T05:18:03,430 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/7d6682c1283d484394a82ca258ee7a85 is 1080, key is row0062/info:/1733721469194/Put/seqid=0 2024-12-09T05:18:03,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741858_1034 (size=73410) 2024-12-09T05:18:03,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741858_1034 (size=73410) 2024-12-09T05:18:03,443 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/7d6682c1283d484394a82ca258ee7a85 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7d6682c1283d484394a82ca258ee7a85 2024-12-09T05:18:03,449 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7fac5053b0b382751073fe7134f7b9c/info of b7fac5053b0b382751073fe7134f7b9c into 7d6682c1283d484394a82ca258ee7a85(size=71.7 K), total size for store is 71.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T05:18:03,449 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:03,450 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c., storeName=b7fac5053b0b382751073fe7134f7b9c/info, priority=13, startTime=1733721483414; duration=0sec 2024-12-09T05:18:03,450 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:18:03,450 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7fac5053b0b382751073fe7134f7b9c:info 2024-12-09T05:18:04,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:05,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:05,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:05,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7fac5053b0b382751073fe7134f7b9c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:18:05,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/77bbb72f932d4b319658ce472a6e9deb is 1080, key is row0125/info:/1733721483385/Put/seqid=0 2024-12-09T05:18:05,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741859_1035 (size=12516) 2024-12-09T05:18:05,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741859_1035 (size=12516) 2024-12-09T05:18:05,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/77bbb72f932d4b319658ce472a6e9deb 2024-12-09T05:18:05,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/77bbb72f932d4b319658ce472a6e9deb as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/77bbb72f932d4b319658ce472a6e9deb 2024-12-09T05:18:05,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/77bbb72f932d4b319658ce472a6e9deb, entries=7, sequenceid=169, filesize=12.2 K 2024-12-09T05:18:05,421 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for b7fac5053b0b382751073fe7134f7b9c in 27ms, sequenceid=169, compaction requested=false 2024-12-09T05:18:05,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:05,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:05,422 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7fac5053b0b382751073fe7134f7b9c 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T05:18:05,427 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/a5b7230285c14c6c8113834610486f93 is 1080, key is row0132/info:/1733721485394/Put/seqid=0 2024-12-09T05:18:05,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741860_1036 (size=29784) 2024-12-09T05:18:05,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741860_1036 (size=29784) 2024-12-09T05:18:05,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/a5b7230285c14c6c8113834610486f93 2024-12-09T05:18:05,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/a5b7230285c14c6c8113834610486f93 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/a5b7230285c14c6c8113834610486f93 2024-12-09T05:18:05,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/a5b7230285c14c6c8113834610486f93, entries=23, sequenceid=195, filesize=29.1 K 2024-12-09T05:18:05,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=6.30 KB/6456 for b7fac5053b0b382751073fe7134f7b9c in 21ms, sequenceid=195, compaction requested=true 2024-12-09T05:18:05,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:05,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7fac5053b0b382751073fe7134f7b9c:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T05:18:05,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:18:05,443 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T05:18:05,444 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 115710 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T05:18:05,444 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1540): b7fac5053b0b382751073fe7134f7b9c/info is initiating minor compaction (all files) 2024-12-09T05:18:05,444 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7fac5053b0b382751073fe7134f7b9c/info in TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:18:05,444 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7d6682c1283d484394a82ca258ee7a85, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/77bbb72f932d4b319658ce472a6e9deb, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/a5b7230285c14c6c8113834610486f93] into tmpdir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp, totalSize=113.0 K 2024-12-09T05:18:05,444 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d6682c1283d484394a82ca258ee7a85, keycount=63, bloomtype=ROW, size=71.7 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733721469194 2024-12-09T05:18:05,445 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77bbb72f932d4b319658ce472a6e9deb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733721483385 2024-12-09T05:18:05,445 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5b7230285c14c6c8113834610486f93, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733721485394 2024-12-09T05:18:05,456 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7fac5053b0b382751073fe7134f7b9c#info#compaction#57 average throughput is 47.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T05:18:05,456 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/885c7d596c644d7f89e6f92b94db52d6 is 1080, key is row0062/info:/1733721469194/Put/seqid=0 2024-12-09T05:18:05,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741861_1037 (size=105860) 2024-12-09T05:18:05,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741861_1037 (size=105860) 2024-12-09T05:18:05,465 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/885c7d596c644d7f89e6f92b94db52d6 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/885c7d596c644d7f89e6f92b94db52d6 2024-12-09T05:18:05,470 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7fac5053b0b382751073fe7134f7b9c/info of b7fac5053b0b382751073fe7134f7b9c into 885c7d596c644d7f89e6f92b94db52d6(size=103.4 K), total size for store is 103.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T05:18:05,470 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:05,471 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c., storeName=b7fac5053b0b382751073fe7134f7b9c/info, priority=13, startTime=1733721485443; duration=0sec 2024-12-09T05:18:05,471 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:18:05,471 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7fac5053b0b382751073fe7134f7b9c:info 2024-12-09T05:18:06,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:06,985 INFO [master/41a709354867:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T05:18:06,985 INFO [master/41a709354867:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T05:18:07,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:07,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:07,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7fac5053b0b382751073fe7134f7b9c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:18:07,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/07df6998aff3480f8104f6ffe6202823 is 1080, key is row0155/info:/1733721485423/Put/seqid=0 2024-12-09T05:18:07,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741862_1038 (size=12516) 2024-12-09T05:18:07,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741862_1038 (size=12516) 2024-12-09T05:18:07,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/07df6998aff3480f8104f6ffe6202823 2024-12-09T05:18:07,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/07df6998aff3480f8104f6ffe6202823 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/07df6998aff3480f8104f6ffe6202823 2024-12-09T05:18:07,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b7fac5053b0b382751073fe7134f7b9c, server=41a709354867,39913,1733721445744 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T05:18:07,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50972 deadline: 1733721497454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b7fac5053b0b382751073fe7134f7b9c, server=41a709354867,39913,1733721445744 2024-12-09T05:18:07,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/07df6998aff3480f8104f6ffe6202823, entries=7, sequenceid=206, filesize=12.2 K 2024-12-09T05:18:07,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for b7fac5053b0b382751073fe7134f7b9c in 26ms, sequenceid=206, compaction requested=false 2024-12-09T05:18:07,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:08,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:09,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:10,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:11,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:11,490 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 14835 2024-12-09T05:18:12,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:13,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:14,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:15,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:16,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:17,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:17,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:17,466 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7fac5053b0b382751073fe7134f7b9c 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T05:18:17,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/6c267183a4ef4b8495be0302370119c4 is 1080, key is row0162/info:/1733721487431/Put/seqid=0 2024-12-09T05:18:17,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b7fac5053b0b382751073fe7134f7b9c, server=41a709354867,39913,1733721445744 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T05:18:17,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50972 deadline: 1733721507473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b7fac5053b0b382751073fe7134f7b9c, server=41a709354867,39913,1733721445744 2024-12-09T05:18:17,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741863_1039 (size=29784) 2024-12-09T05:18:17,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741863_1039 (size=29784) 2024-12-09T05:18:17,482 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/6c267183a4ef4b8495be0302370119c4 2024-12-09T05:18:17,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/6c267183a4ef4b8495be0302370119c4 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/6c267183a4ef4b8495be0302370119c4 2024-12-09T05:18:17,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/6c267183a4ef4b8495be0302370119c4, entries=23, sequenceid=232, filesize=29.1 K 2024-12-09T05:18:17,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=6.30 KB/6456 for b7fac5053b0b382751073fe7134f7b9c in 28ms, sequenceid=232, compaction requested=true 2024-12-09T05:18:17,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:17,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7fac5053b0b382751073fe7134f7b9c:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T05:18:17,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:18:17,494 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T05:18:17,495 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 148160 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T05:18:17,495 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1540): b7fac5053b0b382751073fe7134f7b9c/info is initiating minor compaction (all files) 2024-12-09T05:18:17,495 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7fac5053b0b382751073fe7134f7b9c/info in TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:18:17,495 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/885c7d596c644d7f89e6f92b94db52d6, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/07df6998aff3480f8104f6ffe6202823, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/6c267183a4ef4b8495be0302370119c4] into tmpdir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp, totalSize=144.7 K 2024-12-09T05:18:17,496 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 885c7d596c644d7f89e6f92b94db52d6, keycount=93, bloomtype=ROW, size=103.4 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733721469194 2024-12-09T05:18:17,496 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07df6998aff3480f8104f6ffe6202823, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733721485423 2024-12-09T05:18:17,496 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c267183a4ef4b8495be0302370119c4, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733721487431 2024-12-09T05:18:17,509 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7fac5053b0b382751073fe7134f7b9c#info#compaction#60 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T05:18:17,509 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/a2a59a41e6834554a2c5079e5588ba50 is 1080, key is row0062/info:/1733721469194/Put/seqid=0 2024-12-09T05:18:17,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741864_1040 (size=138507) 2024-12-09T05:18:17,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741864_1040 (size=138507) 2024-12-09T05:18:17,521 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/a2a59a41e6834554a2c5079e5588ba50 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/a2a59a41e6834554a2c5079e5588ba50 2024-12-09T05:18:17,527 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7fac5053b0b382751073fe7134f7b9c/info of b7fac5053b0b382751073fe7134f7b9c into a2a59a41e6834554a2c5079e5588ba50(size=135.3 K), total size for store is 135.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T05:18:17,527 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:17,527 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c., storeName=b7fac5053b0b382751073fe7134f7b9c/info, priority=13, startTime=1733721497494; duration=0sec 2024-12-09T05:18:17,528 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:18:17,528 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7fac5053b0b382751073fe7134f7b9c:info 2024-12-09T05:18:18,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:19,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:20,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:21,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:22,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:23,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:23,995 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 688aa77f4712fd33e61f733d63bfbd0a, had cached 0 bytes from a total of 23930 2024-12-09T05:18:24,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:25,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:25,674 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T05:18:26,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:27,118 DEBUG [master/41a709354867:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T05:18:27,118 DEBUG [master/41a709354867:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 4c92af42adee675c0deec3b68367073c changed from -1.0 to 0.0, refreshing cache 2024-12-09T05:18:27,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:27,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:27,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7fac5053b0b382751073fe7134f7b9c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:18:27,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/94a500530e6242d1a182987299fac6bd is 1080, key is row0185/info:/1733721497467/Put/seqid=0 2024-12-09T05:18:27,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741865_1041 (size=12516) 2024-12-09T05:18:27,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741865_1041 (size=12516) 2024-12-09T05:18:27,537 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/94a500530e6242d1a182987299fac6bd 2024-12-09T05:18:27,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/94a500530e6242d1a182987299fac6bd as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/94a500530e6242d1a182987299fac6bd 2024-12-09T05:18:27,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/94a500530e6242d1a182987299fac6bd, entries=7, sequenceid=243, filesize=12.2 K 2024-12-09T05:18:27,548 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for b7fac5053b0b382751073fe7134f7b9c in 22ms, sequenceid=243, compaction requested=false 2024-12-09T05:18:27,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:28,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:29,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:29,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:29,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7fac5053b0b382751073fe7134f7b9c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:18:29,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/772ff69c37d44cd59f3256a547890efe is 1080, key is row0192/info:/1733721507526/Put/seqid=0 2024-12-09T05:18:29,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741866_1042 (size=12516) 2024-12-09T05:18:29,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741866_1042 (size=12516) 2024-12-09T05:18:29,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/772ff69c37d44cd59f3256a547890efe 2024-12-09T05:18:29,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/772ff69c37d44cd59f3256a547890efe as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/772ff69c37d44cd59f3256a547890efe 2024-12-09T05:18:29,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b7fac5053b0b382751073fe7134f7b9c, server=41a709354867,39913,1733721445744 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T05:18:29,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50972 deadline: 1733721519558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b7fac5053b0b382751073fe7134f7b9c, server=41a709354867,39913,1733721445744 2024-12-09T05:18:29,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/772ff69c37d44cd59f3256a547890efe, entries=7, sequenceid=253, filesize=12.2 K 2024-12-09T05:18:29,561 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for b7fac5053b0b382751073fe7134f7b9c in 28ms, sequenceid=253, compaction requested=true 2024-12-09T05:18:29,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:29,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7fac5053b0b382751073fe7134f7b9c:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T05:18:29,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:18:29,562 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T05:18:29,563 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 163539 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T05:18:29,563 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1540): b7fac5053b0b382751073fe7134f7b9c/info is initiating minor compaction (all files) 2024-12-09T05:18:29,563 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7fac5053b0b382751073fe7134f7b9c/info in TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:18:29,563 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/a2a59a41e6834554a2c5079e5588ba50, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/94a500530e6242d1a182987299fac6bd, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/772ff69c37d44cd59f3256a547890efe] into tmpdir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp, totalSize=159.7 K 2024-12-09T05:18:29,563 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2a59a41e6834554a2c5079e5588ba50, keycount=123, bloomtype=ROW, size=135.3 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733721469194 2024-12-09T05:18:29,563 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94a500530e6242d1a182987299fac6bd, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733721497467 2024-12-09T05:18:29,564 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 772ff69c37d44cd59f3256a547890efe, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733721507526 2024-12-09T05:18:29,576 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7fac5053b0b382751073fe7134f7b9c#info#compaction#63 average throughput is 46.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T05:18:29,577 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/2d7ca3873bbf4a59adfee253db41911f is 1080, key is row0062/info:/1733721469194/Put/seqid=0 2024-12-09T05:18:29,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741867_1043 (size=153701) 2024-12-09T05:18:29,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741867_1043 (size=153701) 2024-12-09T05:18:29,586 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/2d7ca3873bbf4a59adfee253db41911f as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/2d7ca3873bbf4a59adfee253db41911f 2024-12-09T05:18:29,591 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7fac5053b0b382751073fe7134f7b9c/info of b7fac5053b0b382751073fe7134f7b9c into 2d7ca3873bbf4a59adfee253db41911f(size=150.1 K), total size for store is 150.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T05:18:29,591 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:29,591 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c., storeName=b7fac5053b0b382751073fe7134f7b9c/info, priority=13, startTime=1733721509562; duration=0sec 2024-12-09T05:18:29,591 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:18:29,591 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7fac5053b0b382751073fe7134f7b9c:info 2024-12-09T05:18:30,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:31,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:32,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:33,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:34,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:35,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:36,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:36,934 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 35e9bfca82c3f49a16c93d34a671827f, had cached 0 bytes from a total of 70862 2024-12-09T05:18:36,956 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region b7fac5053b0b382751073fe7134f7b9c, had cached 0 bytes from a total of 153701 2024-12-09T05:18:37,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:38,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:39,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:39,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:39,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7fac5053b0b382751073fe7134f7b9c 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T05:18:39,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/b3cef86c8fc649b2a4bb107593a4271e is 1080, key is row0199/info:/1733721509534/Put/seqid=0 2024-12-09T05:18:39,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741868_1044 (size=29806) 2024-12-09T05:18:39,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741868_1044 (size=29806) 2024-12-09T05:18:39,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/b3cef86c8fc649b2a4bb107593a4271e 2024-12-09T05:18:39,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/b3cef86c8fc649b2a4bb107593a4271e as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/b3cef86c8fc649b2a4bb107593a4271e 2024-12-09T05:18:39,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/b3cef86c8fc649b2a4bb107593a4271e, entries=23, sequenceid=280, filesize=29.1 K 2024-12-09T05:18:39,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for b7fac5053b0b382751073fe7134f7b9c in 24ms, sequenceid=280, compaction requested=false 2024-12-09T05:18:39,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:40,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:41,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:41,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta after 196121ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor238.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:18:41,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:41,571 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7fac5053b0b382751073fe7134f7b9c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T05:18:41,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/7ae60b23e3734c0480bafefb67d73ce4 is 1080, key is row0222/info:/1733721519564/Put/seqid=0 2024-12-09T05:18:41,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741869_1045 (size=12523) 2024-12-09T05:18:41,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741869_1045 (size=12523) 2024-12-09T05:18:41,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/7ae60b23e3734c0480bafefb67d73ce4 2024-12-09T05:18:41,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/7ae60b23e3734c0480bafefb67d73ce4 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7ae60b23e3734c0480bafefb67d73ce4 2024-12-09T05:18:41,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7ae60b23e3734c0480bafefb67d73ce4, entries=7, sequenceid=290, filesize=12.2 K 2024-12-09T05:18:41,593 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for b7fac5053b0b382751073fe7134f7b9c in 22ms, sequenceid=290, compaction requested=true 2024-12-09T05:18:41,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:41,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7fac5053b0b382751073fe7134f7b9c:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T05:18:41,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:18:41,593 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T05:18:41,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39913 {}] regionserver.HRegion(8581): Flush requested on b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:41,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7fac5053b0b382751073fe7134f7b9c 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T05:18:41,595 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 196030 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T05:18:41,595 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1540): b7fac5053b0b382751073fe7134f7b9c/info is initiating minor compaction (all files) 2024-12-09T05:18:41,595 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7fac5053b0b382751073fe7134f7b9c/info in TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:18:41,595 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/2d7ca3873bbf4a59adfee253db41911f, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/b3cef86c8fc649b2a4bb107593a4271e, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7ae60b23e3734c0480bafefb67d73ce4] into tmpdir=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp, totalSize=191.4 K 2024-12-09T05:18:41,595 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d7ca3873bbf4a59adfee253db41911f, keycount=137, bloomtype=ROW, size=150.1 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733721469194 2024-12-09T05:18:41,596 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3cef86c8fc649b2a4bb107593a4271e, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733721509534 2024-12-09T05:18:41,597 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ae60b23e3734c0480bafefb67d73ce4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733721519564 2024-12-09T05:18:41,600 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/b21c780f98e74570b10f32c675710cae is 1080, key is row0229/info:/1733721521571/Put/seqid=0 2024-12-09T05:18:41,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741870_1046 (size=29807) 2024-12-09T05:18:41,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741870_1046 (size=29807) 2024-12-09T05:18:41,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/b21c780f98e74570b10f32c675710cae 2024-12-09T05:18:41,611 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7fac5053b0b382751073fe7134f7b9c#info#compaction#67 average throughput is 57.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T05:18:41,612 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/df2f7a0c2c9942b0802f0c4db1aac845 is 1080, key is row0062/info:/1733721469194/Put/seqid=0 2024-12-09T05:18:41,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/b21c780f98e74570b10f32c675710cae as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/b21c780f98e74570b10f32c675710cae 2024-12-09T05:18:41,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741871_1047 (size=186180) 2024-12-09T05:18:41,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741871_1047 (size=186180) 2024-12-09T05:18:41,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/b21c780f98e74570b10f32c675710cae, entries=23, sequenceid=316, filesize=29.1 K 2024-12-09T05:18:41,623 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/df2f7a0c2c9942b0802f0c4db1aac845 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/df2f7a0c2c9942b0802f0c4db1aac845 2024-12-09T05:18:41,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=5.25 KB/5380 for b7fac5053b0b382751073fe7134f7b9c in 29ms, sequenceid=316, compaction requested=false 2024-12-09T05:18:41,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:41,628 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7fac5053b0b382751073fe7134f7b9c/info of b7fac5053b0b382751073fe7134f7b9c into df2f7a0c2c9942b0802f0c4db1aac845(size=181.8 K), total size for store is 210.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T05:18:41,628 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:41,628 INFO [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c., storeName=b7fac5053b0b382751073fe7134f7b9c/info, priority=13, startTime=1733721521593; duration=0sec 2024-12-09T05:18:41,628 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T05:18:41,628 DEBUG [RS:0;41a709354867:39913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7fac5053b0b382751073fe7134f7b9c:info 2024-12-09T05:18:42,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:43,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:43,601 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-09T05:18:43,601 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C39913%2C1733721445744.1733721523601 2024-12-09T05:18:43,609 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744/41a709354867%2C39913%2C1733721445744.1733721446125 with entries=308, filesize=306.54 KB; new WAL /user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744/41a709354867%2C39913%2C1733721445744.1733721523601 2024-12-09T05:18:43,609 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36645:36645),(127.0.0.1/127.0.0.1:33801:33801)] 2024-12-09T05:18:43,609 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744/41a709354867%2C39913%2C1733721445744.1733721446125 is not closed yet, will try archiving it next time 2024-12-09T05:18:43,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741833_1009 (size=313906) 2024-12-09T05:18:43,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741833_1009 (size=313906) 2024-12-09T05:18:43,613 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing b7fac5053b0b382751073fe7134f7b9c 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-12-09T05:18:43,616 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/8d2a31e44dbc41d48dfa50f7a6b06e41 is 1080, key is row0252/info:/1733721521595/Put/seqid=0 2024-12-09T05:18:43,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741873_1049 (size=10357) 2024-12-09T05:18:43,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741873_1049 (size=10357) 2024-12-09T05:18:43,621 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/8d2a31e44dbc41d48dfa50f7a6b06e41 2024-12-09T05:18:43,626 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/.tmp/info/8d2a31e44dbc41d48dfa50f7a6b06e41 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/8d2a31e44dbc41d48dfa50f7a6b06e41 2024-12-09T05:18:43,631 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/8d2a31e44dbc41d48dfa50f7a6b06e41, entries=5, sequenceid=325, filesize=10.1 K 2024-12-09T05:18:43,632 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for b7fac5053b0b382751073fe7134f7b9c in 19ms, sequenceid=325, compaction requested=true 2024-12-09T05:18:43,632 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:43,632 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 4c92af42adee675c0deec3b68367073c 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-09T05:18:43,650 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/namespace/4c92af42adee675c0deec3b68367073c/.tmp/info/68f21ef2c2634a88aa60959be5bb7ef6 is 45, key is default/info:d/1733721446936/Put/seqid=0 2024-12-09T05:18:43,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741874_1050 (size=5037) 2024-12-09T05:18:43,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741874_1050 (size=5037) 2024-12-09T05:18:43,655 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/namespace/4c92af42adee675c0deec3b68367073c/.tmp/info/68f21ef2c2634a88aa60959be5bb7ef6 2024-12-09T05:18:43,661 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/namespace/4c92af42adee675c0deec3b68367073c/.tmp/info/68f21ef2c2634a88aa60959be5bb7ef6 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/namespace/4c92af42adee675c0deec3b68367073c/info/68f21ef2c2634a88aa60959be5bb7ef6 2024-12-09T05:18:43,666 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/namespace/4c92af42adee675c0deec3b68367073c/info/68f21ef2c2634a88aa60959be5bb7ef6, entries=2, sequenceid=6, filesize=4.9 K 2024-12-09T05:18:43,667 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 4c92af42adee675c0deec3b68367073c in 35ms, sequenceid=6, compaction requested=false 2024-12-09T05:18:43,667 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 4c92af42adee675c0deec3b68367073c: 2024-12-09T05:18:43,667 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 35e9bfca82c3f49a16c93d34a671827f: 2024-12-09T05:18:43,667 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.21 KB heapSize=4.13 KB 2024-12-09T05:18:43,671 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/.tmp/info/e5612dbf1a444378991d551f00c7a244 is 193, key is TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c./info:regioninfo/1733721471985/Put/seqid=0 2024-12-09T05:18:43,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741875_1051 (size=7803) 2024-12-09T05:18:43,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741875_1051 (size=7803) 2024-12-09T05:18:43,677 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.21 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/.tmp/info/e5612dbf1a444378991d551f00c7a244 2024-12-09T05:18:43,682 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/.tmp/info/e5612dbf1a444378991d551f00c7a244 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/info/e5612dbf1a444378991d551f00c7a244 2024-12-09T05:18:43,687 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/info/e5612dbf1a444378991d551f00c7a244, entries=16, sequenceid=24, filesize=7.6 K 2024-12-09T05:18:43,687 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~2.21 KB/2260, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=24, compaction requested=false 2024-12-09T05:18:43,688 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-09T05:18:43,688 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C39913%2C1733721445744.1733721523688 2024-12-09T05:18:43,693 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744/41a709354867%2C39913%2C1733721445744.1733721523601 with entries=4, filesize=1.22 KB; new WAL /user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744/41a709354867%2C39913%2C1733721445744.1733721523688 2024-12-09T05:18:43,693 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33801:33801),(127.0.0.1/127.0.0.1:36645:36645)] 2024-12-09T05:18:43,693 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744/41a709354867%2C39913%2C1733721445744.1733721523601 is not closed yet, will try archiving it next time 2024-12-09T05:18:43,694 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744/41a709354867%2C39913%2C1733721445744.1733721446125 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/oldWALs/41a709354867%2C39913%2C1733721445744.1733721446125 2024-12-09T05:18:43,694 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T05:18:43,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741872_1048 (size=1255) 2024-12-09T05:18:43,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741872_1048 (size=1255) 2024-12-09T05:18:43,695 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744/41a709354867%2C39913%2C1733721445744.1733721523601 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/oldWALs/41a709354867%2C39913%2C1733721445744.1733721523601 2024-12-09T05:18:43,795 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-09T05:18:43,795 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-09T05:18:43,795 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0245eb78 to 127.0.0.1:59807 2024-12-09T05:18:43,795 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:18:43,795 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T05:18:43,795 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1673680637, stopped=false 2024-12-09T05:18:43,795 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=41a709354867,35729,1733721445690 2024-12-09T05:18:43,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:18:43,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:18:43,797 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-09T05:18:43,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:43,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:43,797 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:18:43,797 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,39913,1733721445744' ***** 2024-12-09T05:18:43,797 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-09T05:18:43,797 INFO [RS:0;41a709354867:39913 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:18:43,797 INFO [RS:0;41a709354867:39913 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:18:43,797 INFO [RS:0;41a709354867:39913 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:18:43,797 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-09T05:18:43,797 INFO [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(3579): Received CLOSE for b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:43,798 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:18:43,798 INFO [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(3579): Received CLOSE for 4c92af42adee675c0deec3b68367073c 2024-12-09T05:18:43,798 INFO [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(3579): Received CLOSE for 35e9bfca82c3f49a16c93d34a671827f 2024-12-09T05:18:43,798 INFO [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,39913,1733721445744 2024-12-09T05:18:43,798 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:18:43,798 DEBUG [RS:0;41a709354867:39913 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:18:43,798 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing b7fac5053b0b382751073fe7134f7b9c, disabling compactions & flushes 2024-12-09T05:18:43,798 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:18:43,798 INFO [RS:0;41a709354867:39913 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:18:43,798 INFO [RS:0;41a709354867:39913 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:18:43,798 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:18:43,798 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. after waiting 0 ms 2024-12-09T05:18:43,798 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:18:43,798 INFO [RS:0;41a709354867:39913 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:18:43,798 INFO [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-09T05:18:43,799 INFO [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(1599): Waiting on 4 regions to close 2024-12-09T05:18:43,799 DEBUG [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(1603): Online Regions={b7fac5053b0b382751073fe7134f7b9c=TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c., 4c92af42adee675c0deec3b68367073c=hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c., 35e9bfca82c3f49a16c93d34a671827f=TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T05:18:43,799 DEBUG [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 35e9bfca82c3f49a16c93d34a671827f, 4c92af42adee675c0deec3b68367073c, b7fac5053b0b382751073fe7134f7b9c 2024-12-09T05:18:43,799 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:18:43,799 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:18:43,799 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:18:43,799 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:18:43,799 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:18:43,799 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c->hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/7618143e5ddf4f6daaa0ae81a28bba14-top, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-80cfdf26267541f59f4e378c480e89b2, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/0bd35a841a9a42cbb1647c8be0741b5a, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-cb44807ff99e46cf9dac7142fd5f408b, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/d515a3b4ce404d9fa2f02ddc1de7a0e2, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7d6682c1283d484394a82ca258ee7a85, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/3ba9588a4a3e43e8a1ef227317224a02, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/77bbb72f932d4b319658ce472a6e9deb, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/885c7d596c644d7f89e6f92b94db52d6, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/a5b7230285c14c6c8113834610486f93, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/07df6998aff3480f8104f6ffe6202823, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/a2a59a41e6834554a2c5079e5588ba50, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/6c267183a4ef4b8495be0302370119c4, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/94a500530e6242d1a182987299fac6bd, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/2d7ca3873bbf4a59adfee253db41911f, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/772ff69c37d44cd59f3256a547890efe, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/b3cef86c8fc649b2a4bb107593a4271e, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7ae60b23e3734c0480bafefb67d73ce4] to archive 2024-12-09T05:18:43,800 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T05:18:43,802 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c 2024-12-09T05:18:43,803 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-80cfdf26267541f59f4e378c480e89b2 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-80cfdf26267541f59f4e378c480e89b2 2024-12-09T05:18:43,804 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/meta/1588230740/recovered.edits/27.seqid, newMaxSeqId=27, maxSeqId=1 2024-12-09T05:18:43,805 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T05:18:43,805 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/0bd35a841a9a42cbb1647c8be0741b5a to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/0bd35a841a9a42cbb1647c8be0741b5a 2024-12-09T05:18:43,805 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T05:18:43,805 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:18:43,805 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T05:18:43,806 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-cb44807ff99e46cf9dac7142fd5f408b to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/TestLogRolling-testLogRolling=850d12d27f942676099898eccdc0c98c-cb44807ff99e46cf9dac7142fd5f408b 2024-12-09T05:18:43,807 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/d515a3b4ce404d9fa2f02ddc1de7a0e2 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/d515a3b4ce404d9fa2f02ddc1de7a0e2 2024-12-09T05:18:43,808 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7d6682c1283d484394a82ca258ee7a85 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7d6682c1283d484394a82ca258ee7a85 2024-12-09T05:18:43,809 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/3ba9588a4a3e43e8a1ef227317224a02 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/3ba9588a4a3e43e8a1ef227317224a02 2024-12-09T05:18:43,810 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/77bbb72f932d4b319658ce472a6e9deb to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/77bbb72f932d4b319658ce472a6e9deb 2024-12-09T05:18:43,811 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/885c7d596c644d7f89e6f92b94db52d6 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/885c7d596c644d7f89e6f92b94db52d6 2024-12-09T05:18:43,813 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/a5b7230285c14c6c8113834610486f93 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/a5b7230285c14c6c8113834610486f93 2024-12-09T05:18:43,814 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/07df6998aff3480f8104f6ffe6202823 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/07df6998aff3480f8104f6ffe6202823 2024-12-09T05:18:43,815 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/a2a59a41e6834554a2c5079e5588ba50 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/a2a59a41e6834554a2c5079e5588ba50 2024-12-09T05:18:43,816 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/6c267183a4ef4b8495be0302370119c4 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/6c267183a4ef4b8495be0302370119c4 2024-12-09T05:18:43,817 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/94a500530e6242d1a182987299fac6bd to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/94a500530e6242d1a182987299fac6bd 2024-12-09T05:18:43,818 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/2d7ca3873bbf4a59adfee253db41911f to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/2d7ca3873bbf4a59adfee253db41911f 2024-12-09T05:18:43,819 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/772ff69c37d44cd59f3256a547890efe to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/772ff69c37d44cd59f3256a547890efe 2024-12-09T05:18:43,820 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/b3cef86c8fc649b2a4bb107593a4271e to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/b3cef86c8fc649b2a4bb107593a4271e 2024-12-09T05:18:43,821 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7ae60b23e3734c0480bafefb67d73ce4 to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/info/7ae60b23e3734c0480bafefb67d73ce4 2024-12-09T05:18:43,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/b7fac5053b0b382751073fe7134f7b9c/recovered.edits/328.seqid, newMaxSeqId=328, maxSeqId=123 2024-12-09T05:18:43,825 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:18:43,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for b7fac5053b0b382751073fe7134f7b9c: 2024-12-09T05:18:43,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733721471271.b7fac5053b0b382751073fe7134f7b9c. 2024-12-09T05:18:43,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 4c92af42adee675c0deec3b68367073c, disabling compactions & flushes 2024-12-09T05:18:43,825 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. 2024-12-09T05:18:43,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. 2024-12-09T05:18:43,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. after waiting 0 ms 2024-12-09T05:18:43,825 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. 2024-12-09T05:18:43,829 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/hbase/namespace/4c92af42adee675c0deec3b68367073c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T05:18:43,829 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. 2024-12-09T05:18:43,829 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 4c92af42adee675c0deec3b68367073c: 2024-12-09T05:18:43,829 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733721446523.4c92af42adee675c0deec3b68367073c. 2024-12-09T05:18:43,829 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 35e9bfca82c3f49a16c93d34a671827f, disabling compactions & flushes 2024-12-09T05:18:43,829 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f. 2024-12-09T05:18:43,829 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f. 2024-12-09T05:18:43,829 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f. after waiting 0 ms 2024-12-09T05:18:43,829 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f. 2024-12-09T05:18:43,830 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f/info/7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c->hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/850d12d27f942676099898eccdc0c98c/info/7618143e5ddf4f6daaa0ae81a28bba14-bottom] to archive 2024-12-09T05:18:43,830 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T05:18:43,832 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f/info/7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c to hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/archive/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f/info/7618143e5ddf4f6daaa0ae81a28bba14.850d12d27f942676099898eccdc0c98c 2024-12-09T05:18:43,835 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/data/default/TestLogRolling-testLogRolling/35e9bfca82c3f49a16c93d34a671827f/recovered.edits/128.seqid, newMaxSeqId=128, maxSeqId=123 2024-12-09T05:18:43,835 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f. 2024-12-09T05:18:43,835 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 35e9bfca82c3f49a16c93d34a671827f: 2024-12-09T05:18:43,835 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733721471271.35e9bfca82c3f49a16c93d34a671827f. 2024-12-09T05:18:43,995 INFO [regionserver/41a709354867:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:18:43,996 INFO [regionserver/41a709354867:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T05:18:43,996 INFO [regionserver/41a709354867:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T05:18:43,999 INFO [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,39913,1733721445744; all regions closed. 2024-12-09T05:18:43,999 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744 2024-12-09T05:18:44,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741834_1010 (size=9351) 2024-12-09T05:18:44,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741834_1010 (size=9351) 2024-12-09T05:18:44,003 DEBUG [RS:0;41a709354867:39913 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/oldWALs 2024-12-09T05:18:44,003 INFO [RS:0;41a709354867:39913 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 41a709354867%2C39913%2C1733721445744.meta:.meta(num 1733721446479) 2024-12-09T05:18:44,004 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/WALs/41a709354867,39913,1733721445744 2024-12-09T05:18:44,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741876_1052 (size=1072) 2024-12-09T05:18:44,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741876_1052 (size=1072) 2024-12-09T05:18:44,007 DEBUG [RS:0;41a709354867:39913 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/oldWALs 2024-12-09T05:18:44,007 INFO [RS:0;41a709354867:39913 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 41a709354867%2C39913%2C1733721445744:(num 1733721523688) 2024-12-09T05:18:44,007 DEBUG [RS:0;41a709354867:39913 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:18:44,007 INFO [RS:0;41a709354867:39913 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:18:44,008 INFO [RS:0;41a709354867:39913 {}] hbase.ChoreService(370): Chore service for: regionserver/41a709354867:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-09T05:18:44,008 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:18:44,008 INFO [RS:0;41a709354867:39913 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:39913 2024-12-09T05:18:44,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/41a709354867,39913,1733721445744 2024-12-09T05:18:44,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:18:44,010 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$364/0x00007fdbe08f58a0@16212f79 rejected from java.util.concurrent.ThreadPoolExecutor@5323a3d9[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-09T05:18:44,011 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [41a709354867,39913,1733721445744] 2024-12-09T05:18:44,011 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 41a709354867,39913,1733721445744; numProcessing=1 2024-12-09T05:18:44,012 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/41a709354867,39913,1733721445744 already deleted, retry=false 2024-12-09T05:18:44,012 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 41a709354867,39913,1733721445744 expired; onlineServers=0 2024-12-09T05:18:44,012 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,35729,1733721445690' ***** 2024-12-09T05:18:44,012 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T05:18:44,013 DEBUG [M:0;41a709354867:35729 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e620377, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:18:44,013 INFO [M:0;41a709354867:35729 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,35729,1733721445690 2024-12-09T05:18:44,013 INFO [M:0;41a709354867:35729 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,35729,1733721445690; all regions closed. 2024-12-09T05:18:44,013 DEBUG [M:0;41a709354867:35729 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:18:44,013 DEBUG [M:0;41a709354867:35729 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T05:18:44,013 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T05:18:44,013 DEBUG [M:0;41a709354867:35729 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T05:18:44,013 DEBUG [master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721445886 {}] cleaner.HFileCleaner(306): Exit Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721445886,5,FailOnTimeoutGroup] 2024-12-09T05:18:44,013 INFO [M:0;41a709354867:35729 {}] hbase.ChoreService(370): Chore service for: master/41a709354867:0 had [] on shutdown 2024-12-09T05:18:44,013 DEBUG [master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721445886 {}] cleaner.HFileCleaner(306): Exit Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721445886,5,FailOnTimeoutGroup] 2024-12-09T05:18:44,013 DEBUG [M:0;41a709354867:35729 {}] master.HMaster(1733): Stopping service threads 2024-12-09T05:18:44,013 INFO [M:0;41a709354867:35729 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T05:18:44,013 INFO [M:0;41a709354867:35729 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T05:18:44,013 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T05:18:44,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T05:18:44,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:44,014 DEBUG [M:0;41a709354867:35729 {}] zookeeper.ZKUtil(347): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T05:18:44,014 WARN [M:0;41a709354867:35729 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T05:18:44,014 INFO [M:0;41a709354867:35729 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-09T05:18:44,014 INFO [M:0;41a709354867:35729 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T05:18:44,014 DEBUG [M:0;41a709354867:35729 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:18:44,014 INFO [M:0;41a709354867:35729 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:18:44,014 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:18:44,014 DEBUG [M:0;41a709354867:35729 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:18:44,014 DEBUG [M:0;41a709354867:35729 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:18:44,014 DEBUG [M:0;41a709354867:35729 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:18:44,014 INFO [M:0;41a709354867:35729 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=66.45 KB heapSize=81.70 KB 2024-12-09T05:18:44,030 DEBUG [M:0;41a709354867:35729 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7b3c6e53b67b4d23a1b5c7a91249003d is 82, key is hbase:meta,,1/info:regioninfo/1733721446503/Put/seqid=0 2024-12-09T05:18:44,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741877_1053 (size=5672) 2024-12-09T05:18:44,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741877_1053 (size=5672) 2024-12-09T05:18:44,035 INFO [M:0;41a709354867:35729 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7b3c6e53b67b4d23a1b5c7a91249003d 2024-12-09T05:18:44,054 DEBUG [M:0;41a709354867:35729 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2aab93c7675b472c9b472a3c387d00a7 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733721447438/Put/seqid=0 2024-12-09T05:18:44,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741878_1054 (size=7286) 2024-12-09T05:18:44,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741878_1054 (size=7286) 2024-12-09T05:18:44,059 INFO [M:0;41a709354867:35729 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.85 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2aab93c7675b472c9b472a3c387d00a7 2024-12-09T05:18:44,063 INFO [M:0;41a709354867:35729 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2aab93c7675b472c9b472a3c387d00a7 2024-12-09T05:18:44,077 DEBUG [M:0;41a709354867:35729 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8144cc52f56a4508a29f8f36e6b769ea is 69, key is 41a709354867,39913,1733721445744/rs:state/1733721445981/Put/seqid=0 2024-12-09T05:18:44,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741879_1055 (size=5156) 2024-12-09T05:18:44,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741879_1055 (size=5156) 2024-12-09T05:18:44,082 INFO [M:0;41a709354867:35729 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8144cc52f56a4508a29f8f36e6b769ea 2024-12-09T05:18:44,100 DEBUG [M:0;41a709354867:35729 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a022dd2381574bb7b2f021fb6507132b is 52, key is load_balancer_on/state:d/1733721447069/Put/seqid=0 2024-12-09T05:18:44,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741880_1056 (size=5056) 2024-12-09T05:18:44,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741880_1056 (size=5056) 2024-12-09T05:18:44,105 INFO [M:0;41a709354867:35729 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a022dd2381574bb7b2f021fb6507132b 2024-12-09T05:18:44,109 DEBUG [M:0;41a709354867:35729 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7b3c6e53b67b4d23a1b5c7a91249003d as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7b3c6e53b67b4d23a1b5c7a91249003d 2024-12-09T05:18:44,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:18:44,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39913-0x10075358e130001, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:18:44,111 INFO [RS:0;41a709354867:39913 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,39913,1733721445744; zookeeper connection closed. 2024-12-09T05:18:44,111 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7a061756 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7a061756 2024-12-09T05:18:44,112 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T05:18:44,113 INFO [M:0;41a709354867:35729 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7b3c6e53b67b4d23a1b5c7a91249003d, entries=8, sequenceid=164, filesize=5.5 K 2024-12-09T05:18:44,114 DEBUG [M:0;41a709354867:35729 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2aab93c7675b472c9b472a3c387d00a7 as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2aab93c7675b472c9b472a3c387d00a7 2024-12-09T05:18:44,118 INFO [M:0;41a709354867:35729 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2aab93c7675b472c9b472a3c387d00a7 2024-12-09T05:18:44,118 INFO [M:0;41a709354867:35729 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2aab93c7675b472c9b472a3c387d00a7, entries=18, sequenceid=164, filesize=7.1 K 2024-12-09T05:18:44,118 DEBUG [M:0;41a709354867:35729 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8144cc52f56a4508a29f8f36e6b769ea as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8144cc52f56a4508a29f8f36e6b769ea 2024-12-09T05:18:44,123 INFO [M:0;41a709354867:35729 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8144cc52f56a4508a29f8f36e6b769ea, entries=1, sequenceid=164, filesize=5.0 K 2024-12-09T05:18:44,123 DEBUG [M:0;41a709354867:35729 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a022dd2381574bb7b2f021fb6507132b as hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a022dd2381574bb7b2f021fb6507132b 2024-12-09T05:18:44,127 INFO [M:0;41a709354867:35729 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39619/user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a022dd2381574bb7b2f021fb6507132b, entries=1, sequenceid=164, filesize=4.9 K 2024-12-09T05:18:44,128 INFO [M:0;41a709354867:35729 {}] regionserver.HRegion(3040): Finished flush of dataSize ~66.45 KB/68043, heapSize ~81.63 KB/83592, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=164, compaction requested=false 2024-12-09T05:18:44,130 INFO [M:0;41a709354867:35729 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:18:44,130 DEBUG [M:0;41a709354867:35729 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:18:44,130 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/61e27ef4-8733-32b5-ead9-42d7283b996a/MasterData/WALs/41a709354867,35729,1733721445690 2024-12-09T05:18:44,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40731 is added to blk_1073741830_1006 (size=79272) 2024-12-09T05:18:44,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45159 is added to blk_1073741830_1006 (size=79272) 2024-12-09T05:18:44,132 INFO [M:0;41a709354867:35729 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-09T05:18:44,132 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:18:44,132 INFO [M:0;41a709354867:35729 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35729 2024-12-09T05:18:44,134 DEBUG [M:0;41a709354867:35729 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/41a709354867,35729,1733721445690 already deleted, retry=false 2024-12-09T05:18:44,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:44,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:18:44,235 INFO [M:0;41a709354867:35729 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,35729,1733721445690; zookeeper connection closed. 2024-12-09T05:18:44,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35729-0x10075358e130000, quorum=127.0.0.1:59807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:18:44,237 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@541e9d76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:18:44,238 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f24f78c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:18:44,238 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:18:44,238 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53e8a72b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:18:44,238 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@379b4071{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/hadoop.log.dir/,STOPPED} 2024-12-09T05:18:44,240 WARN [BP-1944428553-172.17.0.2-1733721444988 heartbeating to localhost/127.0.0.1:39619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:18:44,240 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:18:44,240 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:18:44,240 WARN [BP-1944428553-172.17.0.2-1733721444988 heartbeating to localhost/127.0.0.1:39619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1944428553-172.17.0.2-1733721444988 (Datanode Uuid 496194f9-519a-41a1-813a-ac149f2bb4c9) service to localhost/127.0.0.1:39619 2024-12-09T05:18:44,240 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/cluster_f0201350-2d61-1eab-6f3f-a5243f289acf/dfs/data/data3/current/BP-1944428553-172.17.0.2-1733721444988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:18:44,240 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/cluster_f0201350-2d61-1eab-6f3f-a5243f289acf/dfs/data/data4/current/BP-1944428553-172.17.0.2-1733721444988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:18:44,241 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:18:44,243 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3bd654d5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:18:44,243 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b3e8d2b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:18:44,243 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:18:44,243 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70ec77da{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:18:44,244 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@575e60d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/hadoop.log.dir/,STOPPED} 2024-12-09T05:18:44,245 WARN [BP-1944428553-172.17.0.2-1733721444988 heartbeating to localhost/127.0.0.1:39619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:18:44,245 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:18:44,245 WARN [BP-1944428553-172.17.0.2-1733721444988 heartbeating to localhost/127.0.0.1:39619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1944428553-172.17.0.2-1733721444988 (Datanode Uuid 5b212408-bc5f-40e6-97dd-12b7d946c782) service to localhost/127.0.0.1:39619 2024-12-09T05:18:44,245 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:18:44,246 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/cluster_f0201350-2d61-1eab-6f3f-a5243f289acf/dfs/data/data1/current/BP-1944428553-172.17.0.2-1733721444988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:18:44,246 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/cluster_f0201350-2d61-1eab-6f3f-a5243f289acf/dfs/data/data2/current/BP-1944428553-172.17.0.2-1733721444988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:18:44,246 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:18:44,252 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@155b716c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:18:44,252 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@598161d8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:18:44,253 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:18:44,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d8a3285{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:18:44,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b90cefa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/hadoop.log.dir/,STOPPED} 2024-12-09T05:18:44,260 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-09T05:18:44,287 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-09T05:18:44,297 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=124 (was 109) - Thread LEAK? -, OpenFileDescriptor=487 (was 464) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=125 (was 127), ProcessCount=11 (was 11), AvailableMemoryMB=7806 (was 7859) 2024-12-09T05:18:44,304 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=124, OpenFileDescriptor=487, MaxFileDescriptor=1048576, SystemLoadAverage=125, ProcessCount=11, AvailableMemoryMB=7806 2024-12-09T05:18:44,304 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T05:18:44,304 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/hadoop.log.dir so I do NOT create it in target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e 2024-12-09T05:18:44,304 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0bbe08ae-cef3-ba7a-8c05-9d42db496e77/hadoop.tmp.dir so I do NOT create it in target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e 2024-12-09T05:18:44,305 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/cluster_17faa25c-b3ef-701d-69ee-01881b530144, deleteOnExit=true 2024-12-09T05:18:44,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-09T05:18:44,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/test.cache.data in system properties and HBase conf 2024-12-09T05:18:44,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T05:18:44,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/hadoop.log.dir in system properties and HBase conf 2024-12-09T05:18:44,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T05:18:44,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T05:18:44,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-09T05:18:44,305 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T05:18:44,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:18:44,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:18:44,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T05:18:44,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:18:44,306 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T05:18:44,306 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T05:18:44,306 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:18:44,306 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:18:44,306 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T05:18:44,306 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/nfs.dump.dir in system properties and HBase conf 2024-12-09T05:18:44,306 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/java.io.tmpdir in system properties and HBase conf 2024-12-09T05:18:44,306 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:18:44,306 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T05:18:44,306 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T05:18:44,319 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T05:18:44,400 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:18:44,405 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:18:44,407 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:18:44,407 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:18:44,407 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:18:44,408 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:18:44,412 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f05120e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:18:44,412 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@206bee66{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:18:44,566 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6424c570{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/java.io.tmpdir/jetty-localhost-37103-hadoop-hdfs-3_4_1-tests_jar-_-any-17556416187530473549/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:18:44,566 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14479e2e{HTTP/1.1, (http/1.1)}{localhost:37103} 2024-12-09T05:18:44,566 INFO [Time-limited test {}] server.Server(415): Started @366120ms 2024-12-09T05:18:44,579 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T05:18:44,636 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:18:44,640 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:18:44,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:18:44,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:18:44,641 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:18:44,641 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a725035{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:18:44,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@635c060d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:18:44,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53856e5b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/java.io.tmpdir/jetty-localhost-44573-hadoop-hdfs-3_4_1-tests_jar-_-any-3321288320016202201/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:18:44,757 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fd2934a{HTTP/1.1, (http/1.1)}{localhost:44573} 2024-12-09T05:18:44,757 INFO [Time-limited test {}] server.Server(415): Started @366311ms 2024-12-09T05:18:44,758 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:18:44,787 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:18:44,790 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:18:44,791 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:18:44,791 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:18:44,791 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:18:44,792 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22708c2b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:18:44,792 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56cc2cd2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:18:44,853 WARN [Thread-2205 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/cluster_17faa25c-b3ef-701d-69ee-01881b530144/dfs/data/data2/current/BP-1423372129-172.17.0.2-1733721524336/current, will proceed with Du for space computation calculation, 2024-12-09T05:18:44,853 WARN [Thread-2204 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/cluster_17faa25c-b3ef-701d-69ee-01881b530144/dfs/data/data1/current/BP-1423372129-172.17.0.2-1733721524336/current, will proceed with Du for space computation calculation, 2024-12-09T05:18:44,876 WARN [Thread-2183 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:18:44,879 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ddfef3936e5776 with lease ID 0x4c1ee0ede770bda3: Processing first storage report for DS-acb28276-90a4-4c18-b11b-75ee55c36ae0 from datanode DatanodeRegistration(127.0.0.1:33683, datanodeUuid=8f9658a8-ae1c-446d-ae53-b9323fe3a2a0, infoPort=33539, infoSecurePort=0, ipcPort=33299, storageInfo=lv=-57;cid=testClusterID;nsid=228820608;c=1733721524336) 2024-12-09T05:18:44,879 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ddfef3936e5776 with lease ID 0x4c1ee0ede770bda3: from storage DS-acb28276-90a4-4c18-b11b-75ee55c36ae0 node DatanodeRegistration(127.0.0.1:33683, datanodeUuid=8f9658a8-ae1c-446d-ae53-b9323fe3a2a0, infoPort=33539, infoSecurePort=0, ipcPort=33299, storageInfo=lv=-57;cid=testClusterID;nsid=228820608;c=1733721524336), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:18:44,879 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ddfef3936e5776 with lease ID 0x4c1ee0ede770bda3: Processing first storage report for DS-4e902da6-6902-4355-a2f2-551847220161 from datanode DatanodeRegistration(127.0.0.1:33683, datanodeUuid=8f9658a8-ae1c-446d-ae53-b9323fe3a2a0, infoPort=33539, infoSecurePort=0, ipcPort=33299, storageInfo=lv=-57;cid=testClusterID;nsid=228820608;c=1733721524336) 2024-12-09T05:18:44,879 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ddfef3936e5776 with lease ID 0x4c1ee0ede770bda3: from storage DS-4e902da6-6902-4355-a2f2-551847220161 node DatanodeRegistration(127.0.0.1:33683, datanodeUuid=8f9658a8-ae1c-446d-ae53-b9323fe3a2a0, infoPort=33539, infoSecurePort=0, ipcPort=33299, storageInfo=lv=-57;cid=testClusterID;nsid=228820608;c=1733721524336), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:18:44,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4436e022{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/java.io.tmpdir/jetty-localhost-39789-hadoop-hdfs-3_4_1-tests_jar-_-any-4614362981135056584/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:18:44,914 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e2ee2b4{HTTP/1.1, (http/1.1)}{localhost:39789} 2024-12-09T05:18:44,914 INFO [Time-limited test {}] server.Server(415): Started @366468ms 2024-12-09T05:18:44,915 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:18:44,998 WARN [Thread-2230 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/cluster_17faa25c-b3ef-701d-69ee-01881b530144/dfs/data/data3/current/BP-1423372129-172.17.0.2-1733721524336/current, will proceed with Du for space computation calculation, 2024-12-09T05:18:44,998 WARN [Thread-2231 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/cluster_17faa25c-b3ef-701d-69ee-01881b530144/dfs/data/data4/current/BP-1423372129-172.17.0.2-1733721524336/current, will proceed with Du for space computation calculation, 2024-12-09T05:18:45,021 WARN [Thread-2219 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:18:45,023 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xafe8dd5ec856426a with lease ID 0x4c1ee0ede770bda4: Processing first storage report for DS-c98e1210-186b-4956-b037-c86653ed2f4b from datanode DatanodeRegistration(127.0.0.1:36235, datanodeUuid=492aaf1f-1491-4c6f-ad30-5d83af665927, infoPort=41313, infoSecurePort=0, ipcPort=37007, storageInfo=lv=-57;cid=testClusterID;nsid=228820608;c=1733721524336) 2024-12-09T05:18:45,023 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xafe8dd5ec856426a with lease ID 0x4c1ee0ede770bda4: from storage DS-c98e1210-186b-4956-b037-c86653ed2f4b node DatanodeRegistration(127.0.0.1:36235, datanodeUuid=492aaf1f-1491-4c6f-ad30-5d83af665927, infoPort=41313, infoSecurePort=0, ipcPort=37007, storageInfo=lv=-57;cid=testClusterID;nsid=228820608;c=1733721524336), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:18:45,023 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xafe8dd5ec856426a with lease ID 0x4c1ee0ede770bda4: Processing first storage report for DS-9c463495-07d0-46e7-a99b-5d1867569414 from datanode DatanodeRegistration(127.0.0.1:36235, datanodeUuid=492aaf1f-1491-4c6f-ad30-5d83af665927, infoPort=41313, infoSecurePort=0, ipcPort=37007, storageInfo=lv=-57;cid=testClusterID;nsid=228820608;c=1733721524336) 2024-12-09T05:18:45,023 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xafe8dd5ec856426a with lease ID 0x4c1ee0ede770bda4: from storage DS-9c463495-07d0-46e7-a99b-5d1867569414 node DatanodeRegistration(127.0.0.1:36235, datanodeUuid=492aaf1f-1491-4c6f-ad30-5d83af665927, infoPort=41313, infoSecurePort=0, ipcPort=37007, storageInfo=lv=-57;cid=testClusterID;nsid=228820608;c=1733721524336), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:18:45,036 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e 2024-12-09T05:18:45,039 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/cluster_17faa25c-b3ef-701d-69ee-01881b530144/zookeeper_0, clientPort=54454, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/cluster_17faa25c-b3ef-701d-69ee-01881b530144/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/cluster_17faa25c-b3ef-701d-69ee-01881b530144/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T05:18:45,040 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=54454 2024-12-09T05:18:45,040 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:18:45,041 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:18:45,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:18:45,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:18:45,050 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933 with version=8 2024-12-09T05:18:45,050 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:33833/user/jenkins/test-data/afbc9df1-978d-170f-af0e-cb62c5d5ad7f/hbase-staging 2024-12-09T05:18:45,052 INFO [Time-limited test {}] client.ConnectionUtils(129): master/41a709354867:0 server-side Connection retries=45 2024-12-09T05:18:45,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:18:45,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:18:45,052 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:18:45,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:18:45,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:18:45,052 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:18:45,052 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:18:45,053 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34021 2024-12-09T05:18:45,053 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:18:45,054 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:18:45,056 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:34021 connecting to ZooKeeper ensemble=127.0.0.1:54454 2024-12-09T05:18:45,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:340210x0, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:18:45,063 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34021-0x1007536c4140000 connected 2024-12-09T05:18:45,076 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:18:45,076 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:18:45,077 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:18:45,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34021 2024-12-09T05:18:45,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34021 2024-12-09T05:18:45,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34021 2024-12-09T05:18:45,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34021 2024-12-09T05:18:45,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34021 2024-12-09T05:18:45,078 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933, hbase.cluster.distributed=false 2024-12-09T05:18:45,093 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/41a709354867:0 server-side Connection retries=45 2024-12-09T05:18:45,093 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:18:45,094 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:18:45,094 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:18:45,094 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:18:45,094 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:18:45,094 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:18:45,094 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:18:45,095 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44067 2024-12-09T05:18:45,096 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:18:45,096 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:18:45,097 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:18:45,098 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:18:45,100 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44067 connecting to ZooKeeper ensemble=127.0.0.1:54454 2024-12-09T05:18:45,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440670x0, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:18:45,103 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44067-0x1007536c4140001 connected 2024-12-09T05:18:45,103 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:18:45,104 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:18:45,104 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:18:45,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44067 2024-12-09T05:18:45,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44067 2024-12-09T05:18:45,109 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44067 2024-12-09T05:18:45,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44067 2024-12-09T05:18:45,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44067 2024-12-09T05:18:45,115 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/41a709354867,34021,1733721525051 2024-12-09T05:18:45,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:18:45,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:18:45,117 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/41a709354867,34021,1733721525051 2024-12-09T05:18:45,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:18:45,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:18:45,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:45,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:45,120 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:18:45,121 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/41a709354867,34021,1733721525051 from backup master directory 2024-12-09T05:18:45,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/41a709354867,34021,1733721525051 2024-12-09T05:18:45,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:18:45,122 WARN [master/41a709354867:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:18:45,122 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=41a709354867,34021,1733721525051 2024-12-09T05:18:45,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:18:45,122 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:18:45,128 DEBUG [M:0;41a709354867:34021 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;41a709354867:34021 2024-12-09T05:18:45,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:18:45,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:18:45,134 DEBUG [master/41a709354867:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/hbase.id with ID: 5363df38-0ee4-4ed7-a1b3-4eeaf4d3cca2 2024-12-09T05:18:45,144 INFO [master/41a709354867:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:18:45,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:45,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:45,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:18:45,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:18:45,158 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:18:45,159 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T05:18:45,159 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:18:45,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:18:45,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:18:45,165 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store 2024-12-09T05:18:45,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:18:45,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:18:45,171 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:18:45,171 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:18:45,171 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:18:45,171 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:18:45,171 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:18:45,171 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:18:45,171 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:18:45,171 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:18:45,172 WARN [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/.initializing 2024-12-09T05:18:45,172 DEBUG [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/WALs/41a709354867,34021,1733721525051 2024-12-09T05:18:45,174 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C34021%2C1733721525051, suffix=, logDir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/WALs/41a709354867,34021,1733721525051, archiveDir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/oldWALs, maxLogs=10 2024-12-09T05:18:45,175 INFO [master/41a709354867:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C34021%2C1733721525051.1733721525175 2024-12-09T05:18:45,179 INFO [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/WALs/41a709354867,34021,1733721525051/41a709354867%2C34021%2C1733721525051.1733721525175 2024-12-09T05:18:45,179 DEBUG [master/41a709354867:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41313:41313),(127.0.0.1/127.0.0.1:33539:33539)] 2024-12-09T05:18:45,179 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:18:45,179 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:18:45,179 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:18:45,179 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:18:45,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:18:45,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T05:18:45,182 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:45,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:18:45,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:18:45,183 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T05:18:45,183 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:45,184 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:18:45,184 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:18:45,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T05:18:45,185 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:45,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:18:45,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:18:45,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T05:18:45,186 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:45,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:18:45,187 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:18:45,187 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:18:45,188 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T05:18:45,188 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-09T05:18:45,189 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T05:18:45,190 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:18:45,193 DEBUG [master/41a709354867:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:18:45,193 INFO [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728743, jitterRate=-0.07335630059242249}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T05:18:45,194 DEBUG [master/41a709354867:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:18:45,194 INFO [master/41a709354867:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T05:18:45,197 DEBUG [master/41a709354867:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b369322, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:18:45,198 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-09T05:18:45,198 INFO [master/41a709354867:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T05:18:45,198 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T05:18:45,198 INFO [master/41a709354867:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T05:18:45,198 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T05:18:45,199 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-09T05:18:45,199 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T05:18:45,200 INFO [master/41a709354867:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T05:18:45,201 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T05:18:45,203 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-09T05:18:45,203 INFO [master/41a709354867:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T05:18:45,204 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T05:18:45,205 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-09T05:18:45,205 INFO [master/41a709354867:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T05:18:45,206 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T05:18:45,208 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-09T05:18:45,208 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T05:18:45,210 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T05:18:45,211 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T05:18:45,212 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T05:18:45,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:18:45,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:18:45,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:45,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:45,214 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=41a709354867,34021,1733721525051, sessionid=0x1007536c4140000, setting cluster-up flag (Was=false) 2024-12-09T05:18:45,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:45,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:45,222 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T05:18:45,223 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=41a709354867,34021,1733721525051 2024-12-09T05:18:45,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:45,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:45,231 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T05:18:45,232 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=41a709354867,34021,1733721525051 2024-12-09T05:18:45,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:45,234 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-09T05:18:45,235 INFO [master/41a709354867:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-09T05:18:45,235 INFO [master/41a709354867:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T05:18:45,235 DEBUG [master/41a709354867:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 41a709354867,34021,1733721525051 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T05:18:45,235 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:18:45,235 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:18:45,235 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:18:45,235 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/41a709354867:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:18:45,235 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/41a709354867:0, corePoolSize=10, maxPoolSize=10 2024-12-09T05:18:45,235 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:18:45,236 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:18:45,236 DEBUG [master/41a709354867:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:18:45,236 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733721555236 2024-12-09T05:18:45,237 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T05:18:45,237 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T05:18:45,237 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T05:18:45,237 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T05:18:45,237 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T05:18:45,237 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T05:18:45,237 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:18:45,237 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,237 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-09T05:18:45,237 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T05:18:45,237 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T05:18:45,238 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T05:18:45,238 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T05:18:45,238 INFO [master/41a709354867:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T05:18:45,238 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:45,238 DEBUG [master/41a709354867:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721525238,5,FailOnTimeoutGroup] 2024-12-09T05:18:45,238 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:18:45,238 DEBUG [master/41a709354867:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721525238,5,FailOnTimeoutGroup] 2024-12-09T05:18:45,238 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,238 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T05:18:45,238 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,238 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:18:45,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741831_1007 (size=1039) 2024-12-09T05:18:45,249 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-09T05:18:45,249 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933 2024-12-09T05:18:45,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:18:45,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:18:45,258 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:18:45,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:18:45,260 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:18:45,261 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:45,261 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:18:45,261 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:18:45,262 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:18:45,262 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:45,263 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:18:45,263 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:18:45,264 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:18:45,264 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:45,264 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:18:45,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740 2024-12-09T05:18:45,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740 2024-12-09T05:18:45,266 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T05:18:45,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T05:18:45,269 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:18:45,269 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718524, jitterRate=-0.08634988963603973}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T05:18:45,270 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T05:18:45,270 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:18:45,270 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:18:45,270 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:18:45,270 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:18:45,270 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:18:45,270 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T05:18:45,270 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:18:45,271 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:18:45,271 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-09T05:18:45,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T05:18:45,272 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T05:18:45,273 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T05:18:45,327 DEBUG [RS:0;41a709354867:44067 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;41a709354867:44067 2024-12-09T05:18:45,327 INFO [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(1008): ClusterId : 5363df38-0ee4-4ed7-a1b3-4eeaf4d3cca2 2024-12-09T05:18:45,328 DEBUG [RS:0;41a709354867:44067 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:18:45,330 DEBUG [RS:0;41a709354867:44067 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:18:45,330 DEBUG [RS:0;41a709354867:44067 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:18:45,332 DEBUG [RS:0;41a709354867:44067 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:18:45,332 DEBUG [RS:0;41a709354867:44067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e3a9d66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:18:45,332 DEBUG [RS:0;41a709354867:44067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21d35567, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:18:45,333 INFO [RS:0;41a709354867:44067 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-09T05:18:45,333 INFO [RS:0;41a709354867:44067 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-09T05:18:45,333 DEBUG [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-09T05:18:45,333 INFO [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(3073): reportForDuty to master=41a709354867,34021,1733721525051 with isa=41a709354867/172.17.0.2:44067, startcode=1733721525093 2024-12-09T05:18:45,333 DEBUG [RS:0;41a709354867:44067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:18:45,335 INFO [RS-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60015, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:18:45,335 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34021 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 41a709354867,44067,1733721525093 2024-12-09T05:18:45,335 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34021 {}] master.ServerManager(486): Registering regionserver=41a709354867,44067,1733721525093 2024-12-09T05:18:45,337 DEBUG [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933 2024-12-09T05:18:45,337 DEBUG [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38357 2024-12-09T05:18:45,337 DEBUG [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-09T05:18:45,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:18:45,339 DEBUG [RS:0;41a709354867:44067 {}] zookeeper.ZKUtil(111): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/41a709354867,44067,1733721525093 2024-12-09T05:18:45,339 WARN [RS:0;41a709354867:44067 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:18:45,339 INFO [RS:0;41a709354867:44067 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:18:45,339 DEBUG [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/41a709354867,44067,1733721525093 2024-12-09T05:18:45,339 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [41a709354867,44067,1733721525093] 2024-12-09T05:18:45,342 DEBUG [RS:0;41a709354867:44067 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-09T05:18:45,342 INFO [RS:0;41a709354867:44067 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:18:45,343 INFO [RS:0;41a709354867:44067 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:18:45,344 INFO [RS:0;41a709354867:44067 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:18:45,344 INFO [RS:0;41a709354867:44067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,344 INFO [RS:0;41a709354867:44067 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-09T05:18:45,345 INFO [RS:0;41a709354867:44067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/41a709354867:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/41a709354867:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:18:45,345 DEBUG [RS:0;41a709354867:44067 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/41a709354867:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:18:45,346 INFO [RS:0;41a709354867:44067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,346 INFO [RS:0;41a709354867:44067 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,346 INFO [RS:0;41a709354867:44067 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,346 INFO [RS:0;41a709354867:44067 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,346 INFO [RS:0;41a709354867:44067 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,44067,1733721525093-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:18:45,360 INFO [RS:0;41a709354867:44067 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:18:45,360 INFO [RS:0;41a709354867:44067 {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,44067,1733721525093-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,374 INFO [RS:0;41a709354867:44067 {}] regionserver.Replication(204): 41a709354867,44067,1733721525093 started 2024-12-09T05:18:45,374 INFO [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(1767): Serving as 41a709354867,44067,1733721525093, RpcServer on 41a709354867/172.17.0.2:44067, sessionid=0x1007536c4140001 2024-12-09T05:18:45,374 DEBUG [RS:0;41a709354867:44067 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:18:45,374 DEBUG [RS:0;41a709354867:44067 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 41a709354867,44067,1733721525093 2024-12-09T05:18:45,374 DEBUG [RS:0;41a709354867:44067 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,44067,1733721525093' 2024-12-09T05:18:45,374 DEBUG [RS:0;41a709354867:44067 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:18:45,375 DEBUG [RS:0;41a709354867:44067 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:18:45,375 DEBUG [RS:0;41a709354867:44067 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:18:45,375 DEBUG [RS:0;41a709354867:44067 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:18:45,375 DEBUG [RS:0;41a709354867:44067 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 41a709354867,44067,1733721525093 2024-12-09T05:18:45,375 DEBUG [RS:0;41a709354867:44067 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '41a709354867,44067,1733721525093' 2024-12-09T05:18:45,375 DEBUG [RS:0;41a709354867:44067 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:18:45,375 DEBUG [RS:0;41a709354867:44067 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:18:45,375 DEBUG [RS:0;41a709354867:44067 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:18:45,375 INFO [RS:0;41a709354867:44067 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:18:45,375 INFO [RS:0;41a709354867:44067 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:18:45,423 WARN [41a709354867:34021 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-09T05:18:45,477 INFO [RS:0;41a709354867:44067 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C44067%2C1733721525093, suffix=, logDir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/41a709354867,44067,1733721525093, archiveDir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/oldWALs, maxLogs=32 2024-12-09T05:18:45,478 INFO [RS:0;41a709354867:44067 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C44067%2C1733721525093.1733721525478 2024-12-09T05:18:45,487 INFO [RS:0;41a709354867:44067 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/41a709354867,44067,1733721525093/41a709354867%2C44067%2C1733721525093.1733721525478 2024-12-09T05:18:45,487 DEBUG [RS:0;41a709354867:44067 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33539:33539),(127.0.0.1/127.0.0.1:41313:41313)] 2024-12-09T05:18:45,673 DEBUG [41a709354867:34021 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T05:18:45,673 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=41a709354867,44067,1733721525093 2024-12-09T05:18:45,674 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 41a709354867,44067,1733721525093, state=OPENING 2024-12-09T05:18:45,676 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T05:18:45,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:45,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:45,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=41a709354867,44067,1733721525093}] 2024-12-09T05:18:45,678 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:18:45,678 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:18:45,830 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 41a709354867,44067,1733721525093 2024-12-09T05:18:45,831 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T05:18:45,833 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T05:18:45,836 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-09T05:18:45,836 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:18:45,837 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=41a709354867%2C44067%2C1733721525093.meta, suffix=.meta, logDir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/41a709354867,44067,1733721525093, archiveDir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/oldWALs, maxLogs=32 2024-12-09T05:18:45,838 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 41a709354867%2C44067%2C1733721525093.meta.1733721525838.meta 2024-12-09T05:18:45,843 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/41a709354867,44067,1733721525093/41a709354867%2C44067%2C1733721525093.meta.1733721525838.meta 2024-12-09T05:18:45,843 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41313:41313),(127.0.0.1/127.0.0.1:33539:33539)] 2024-12-09T05:18:45,843 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:18:45,843 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T05:18:45,843 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T05:18:45,843 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T05:18:45,843 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T05:18:45,844 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:18:45,844 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-09T05:18:45,844 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-09T05:18:45,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:18:45,845 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:18:45,846 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:45,846 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:18:45,846 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:18:45,847 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:18:45,847 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:45,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:18:45,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:18:45,847 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:18:45,847 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:45,848 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:18:45,848 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740 2024-12-09T05:18:45,849 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740 2024-12-09T05:18:45,850 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T05:18:45,851 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T05:18:45,852 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849150, jitterRate=0.07975123822689056}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T05:18:45,852 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T05:18:45,853 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733721525830 2024-12-09T05:18:45,854 DEBUG [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T05:18:45,854 INFO [RS_OPEN_META-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-09T05:18:45,855 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,44067,1733721525093 2024-12-09T05:18:45,855 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 41a709354867,44067,1733721525093, state=OPEN 2024-12-09T05:18:45,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:18:45,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:18:45,862 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:18:45,862 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:18:45,864 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T05:18:45,864 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=41a709354867,44067,1733721525093 in 184 msec 2024-12-09T05:18:45,865 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T05:18:45,865 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 593 msec 2024-12-09T05:18:45,867 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 633 msec 2024-12-09T05:18:45,867 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733721525867, completionTime=-1 2024-12-09T05:18:45,867 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T05:18:45,867 DEBUG [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-09T05:18:45,868 DEBUG [hconnection-0x599829e2-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:18:45,869 INFO [RS-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:18:45,870 INFO [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-09T05:18:45,870 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733721585870 2024-12-09T05:18:45,870 INFO [master/41a709354867:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733721645870 2024-12-09T05:18:45,870 INFO [master/41a709354867:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 2 msec 2024-12-09T05:18:45,875 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,34021,1733721525051-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,875 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,34021,1733721525051-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,875 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,34021,1733721525051-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,876 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-41a709354867:34021, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,876 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:45,876 INFO [master/41a709354867:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-09T05:18:45,876 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:18:45,877 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-09T05:18:45,877 DEBUG [master/41a709354867:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-09T05:18:45,877 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:18:45,877 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:45,878 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:18:45,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:18:45,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741835_1011 (size=358) 2024-12-09T05:18:45,886 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4ab5a2d54ed9bb811d12558b4cc2c503, NAME => 'hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933 2024-12-09T05:18:45,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:18:45,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741836_1012 (size=42) 2024-12-09T05:18:45,895 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:18:45,895 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 4ab5a2d54ed9bb811d12558b4cc2c503, disabling compactions & flushes 2024-12-09T05:18:45,895 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. 2024-12-09T05:18:45,895 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. 2024-12-09T05:18:45,895 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. after waiting 0 ms 2024-12-09T05:18:45,895 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. 2024-12-09T05:18:45,895 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. 2024-12-09T05:18:45,896 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4ab5a2d54ed9bb811d12558b4cc2c503: 2024-12-09T05:18:45,896 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:18:45,897 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733721525896"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733721525896"}]},"ts":"1733721525896"} 2024-12-09T05:18:45,898 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T05:18:45,899 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:18:45,899 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721525899"}]},"ts":"1733721525899"} 2024-12-09T05:18:45,900 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-09T05:18:45,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4ab5a2d54ed9bb811d12558b4cc2c503, ASSIGN}] 2024-12-09T05:18:45,905 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4ab5a2d54ed9bb811d12558b4cc2c503, ASSIGN 2024-12-09T05:18:45,906 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=4ab5a2d54ed9bb811d12558b4cc2c503, ASSIGN; state=OFFLINE, location=41a709354867,44067,1733721525093; forceNewPlan=false, retain=false 2024-12-09T05:18:46,056 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4ab5a2d54ed9bb811d12558b4cc2c503, regionState=OPENING, regionLocation=41a709354867,44067,1733721525093 2024-12-09T05:18:46,058 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 4ab5a2d54ed9bb811d12558b4cc2c503, server=41a709354867,44067,1733721525093}] 2024-12-09T05:18:46,211 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 41a709354867,44067,1733721525093 2024-12-09T05:18:46,214 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. 2024-12-09T05:18:46,214 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 4ab5a2d54ed9bb811d12558b4cc2c503, NAME => 'hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:18:46,215 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 4ab5a2d54ed9bb811d12558b4cc2c503 2024-12-09T05:18:46,215 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:18:46,215 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 4ab5a2d54ed9bb811d12558b4cc2c503 2024-12-09T05:18:46,215 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 4ab5a2d54ed9bb811d12558b4cc2c503 2024-12-09T05:18:46,216 INFO [StoreOpener-4ab5a2d54ed9bb811d12558b4cc2c503-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4ab5a2d54ed9bb811d12558b4cc2c503 2024-12-09T05:18:46,218 INFO [StoreOpener-4ab5a2d54ed9bb811d12558b4cc2c503-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4ab5a2d54ed9bb811d12558b4cc2c503 columnFamilyName info 2024-12-09T05:18:46,218 DEBUG [StoreOpener-4ab5a2d54ed9bb811d12558b4cc2c503-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:18:46,218 INFO [StoreOpener-4ab5a2d54ed9bb811d12558b4cc2c503-1 {}] regionserver.HStore(327): Store=4ab5a2d54ed9bb811d12558b4cc2c503/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:18:46,219 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/namespace/4ab5a2d54ed9bb811d12558b4cc2c503 2024-12-09T05:18:46,219 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/namespace/4ab5a2d54ed9bb811d12558b4cc2c503 2024-12-09T05:18:46,221 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 4ab5a2d54ed9bb811d12558b4cc2c503 2024-12-09T05:18:46,223 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/namespace/4ab5a2d54ed9bb811d12558b4cc2c503/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:18:46,223 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 4ab5a2d54ed9bb811d12558b4cc2c503; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813389, jitterRate=0.03427855670452118}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:18:46,223 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 4ab5a2d54ed9bb811d12558b4cc2c503: 2024-12-09T05:18:46,224 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503., pid=6, masterSystemTime=1733721526211 2024-12-09T05:18:46,226 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. 2024-12-09T05:18:46,226 INFO [RS_OPEN_PRIORITY_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. 2024-12-09T05:18:46,226 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4ab5a2d54ed9bb811d12558b4cc2c503, regionState=OPEN, openSeqNum=2, regionLocation=41a709354867,44067,1733721525093 2024-12-09T05:18:46,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T05:18:46,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 4ab5a2d54ed9bb811d12558b4cc2c503, server=41a709354867,44067,1733721525093 in 169 msec 2024-12-09T05:18:46,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T05:18:46,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=4ab5a2d54ed9bb811d12558b4cc2c503, ASSIGN in 325 msec 2024-12-09T05:18:46,231 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:18:46,231 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733721526231"}]},"ts":"1733721526231"} 2024-12-09T05:18:46,232 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-09T05:18:46,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45367/user/jenkins/test-data/47340205-b609-d2ac-a469-7c4ce5f9e691/WALs/41a709354867,36179,1733721277192/41a709354867%2C36179%2C1733721277192.meta.1733721277996.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor157.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T05:18:46,235 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:18:46,236 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 359 msec 2024-12-09T05:18:46,278 DEBUG [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-09T05:18:46,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:46,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:18:46,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:46,284 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-09T05:18:46,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:18:46,292 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 8 msec 2024-12-09T05:18:46,295 DEBUG [master/41a709354867:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-09T05:18:46,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T05:18:46,303 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 8 msec 2024-12-09T05:18:46,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-09T05:18:46,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-09T05:18:46,312 INFO [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.190sec 2024-12-09T05:18:46,312 INFO [master/41a709354867:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T05:18:46,312 INFO [master/41a709354867:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T05:18:46,312 INFO [master/41a709354867:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T05:18:46,312 INFO [master/41a709354867:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T05:18:46,312 INFO [master/41a709354867:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T05:18:46,312 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,34021,1733721525051-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:18:46,312 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,34021,1733721525051-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T05:18:46,314 DEBUG [master/41a709354867:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-09T05:18:46,314 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T05:18:46,314 INFO [master/41a709354867:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=41a709354867,34021,1733721525051-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:18:46,317 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18965732 to 127.0.0.1:54454 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58e01c2d 2024-12-09T05:18:46,319 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d616caf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:18:46,321 DEBUG [hconnection-0x3d1e8041-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:18:46,322 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47996, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:18:46,324 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=41a709354867,34021,1733721525051 2024-12-09T05:18:46,324 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:18:46,326 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-09T05:18:46,326 INFO [Time-limited test {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:18:46,328 INFO [Time-limited test {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/test.com,8080,1, archiveDir=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/oldWALs, maxLogs=32 2024-12-09T05:18:46,329 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733721526329 2024-12-09T05:18:46,335 INFO [Time-limited test {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/test.com,8080,1/test.com%2C8080%2C1.1733721526329 2024-12-09T05:18:46,336 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41313:41313),(127.0.0.1/127.0.0.1:33539:33539)] 2024-12-09T05:18:46,336 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733721526336 2024-12-09T05:18:46,341 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/test.com,8080,1/test.com%2C8080%2C1.1733721526329 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/test.com,8080,1/test.com%2C8080%2C1.1733721526336 2024-12-09T05:18:46,341 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33539:33539),(127.0.0.1/127.0.0.1:41313:41313)] 2024-12-09T05:18:46,341 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/test.com,8080,1/test.com%2C8080%2C1.1733721526329 is not closed yet, will try archiving it next time 2024-12-09T05:18:46,342 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/test.com,8080,1 2024-12-09T05:18:46,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741837_1013 (size=93) 2024-12-09T05:18:46,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741837_1013 (size=93) 2024-12-09T05:18:46,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741838_1014 (size=93) 2024-12-09T05:18:46,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741838_1014 (size=93) 2024-12-09T05:18:46,344 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/test.com,8080,1/test.com%2C8080%2C1.1733721526329 to hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/oldWALs/test.com%2C8080%2C1.1733721526329 2024-12-09T05:18:46,346 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/oldWALs 2024-12-09T05:18:46,346 INFO [Time-limited test {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733721526336) 2024-12-09T05:18:46,346 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-09T05:18:46,346 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18965732 to 127.0.0.1:54454 2024-12-09T05:18:46,346 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:18:46,346 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T05:18:46,346 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1584521031, stopped=false 2024-12-09T05:18:46,347 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=41a709354867,34021,1733721525051 2024-12-09T05:18:46,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:18:46,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:46,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:18:46,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:46,349 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-09T05:18:46,350 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:18:46,350 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:18:46,350 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,44067,1733721525093' ***** 2024-12-09T05:18:46,350 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:18:46,350 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-09T05:18:46,350 INFO [RS:0;41a709354867:44067 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:18:46,350 INFO [RS:0;41a709354867:44067 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:18:46,350 INFO [RS:0;41a709354867:44067 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:18:46,350 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-09T05:18:46,350 INFO [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(3579): Received CLOSE for 4ab5a2d54ed9bb811d12558b4cc2c503 2024-12-09T05:18:46,351 INFO [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,44067,1733721525093 2024-12-09T05:18:46,351 DEBUG [RS:0;41a709354867:44067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:18:46,351 INFO [RS:0;41a709354867:44067 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:18:46,351 INFO [RS:0;41a709354867:44067 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:18:46,351 INFO [RS:0;41a709354867:44067 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:18:46,351 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 4ab5a2d54ed9bb811d12558b4cc2c503, disabling compactions & flushes 2024-12-09T05:18:46,351 INFO [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-09T05:18:46,351 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. 2024-12-09T05:18:46,351 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. 2024-12-09T05:18:46,351 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. after waiting 0 ms 2024-12-09T05:18:46,351 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. 2024-12-09T05:18:46,351 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 4ab5a2d54ed9bb811d12558b4cc2c503 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-09T05:18:46,351 INFO [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-09T05:18:46,351 DEBUG [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 4ab5a2d54ed9bb811d12558b4cc2c503=hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503.} 2024-12-09T05:18:46,351 DEBUG [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4ab5a2d54ed9bb811d12558b4cc2c503 2024-12-09T05:18:46,351 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:18:46,352 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T05:18:46,352 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T05:18:46,352 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:18:46,352 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:18:46,352 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=1.23 KB heapSize=2.87 KB 2024-12-09T05:18:46,367 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740/.tmp/info/52242b10f9eb45a5acdb60dd5b589004 is 143, key is hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503./info:regioninfo/1733721526226/Put/seqid=0 2024-12-09T05:18:46,367 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/namespace/4ab5a2d54ed9bb811d12558b4cc2c503/.tmp/info/fd91f9f43ce64f249a6dc2cc0bec1d5d is 45, key is default/info:d/1733721526287/Put/seqid=0 2024-12-09T05:18:46,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741840_1016 (size=5037) 2024-12-09T05:18:46,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741839_1015 (size=6595) 2024-12-09T05:18:46,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741839_1015 (size=6595) 2024-12-09T05:18:46,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741840_1016 (size=5037) 2024-12-09T05:18:46,379 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.14 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740/.tmp/info/52242b10f9eb45a5acdb60dd5b589004 2024-12-09T05:18:46,379 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/namespace/4ab5a2d54ed9bb811d12558b4cc2c503/.tmp/info/fd91f9f43ce64f249a6dc2cc0bec1d5d 2024-12-09T05:18:46,384 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/namespace/4ab5a2d54ed9bb811d12558b4cc2c503/.tmp/info/fd91f9f43ce64f249a6dc2cc0bec1d5d as hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/namespace/4ab5a2d54ed9bb811d12558b4cc2c503/info/fd91f9f43ce64f249a6dc2cc0bec1d5d 2024-12-09T05:18:46,389 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/namespace/4ab5a2d54ed9bb811d12558b4cc2c503/info/fd91f9f43ce64f249a6dc2cc0bec1d5d, entries=2, sequenceid=6, filesize=4.9 K 2024-12-09T05:18:46,390 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 4ab5a2d54ed9bb811d12558b4cc2c503 in 39ms, sequenceid=6, compaction requested=false 2024-12-09T05:18:46,390 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-09T05:18:46,394 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/namespace/4ab5a2d54ed9bb811d12558b4cc2c503/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T05:18:46,395 INFO [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. 2024-12-09T05:18:46,395 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 4ab5a2d54ed9bb811d12558b4cc2c503: 2024-12-09T05:18:46,395 DEBUG [RS_CLOSE_REGION-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733721525876.4ab5a2d54ed9bb811d12558b4cc2c503. 2024-12-09T05:18:46,400 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740/.tmp/table/dcf214180f054f3f9c73278f95805168 is 51, key is hbase:namespace/table:state/1733721526231/Put/seqid=0 2024-12-09T05:18:46,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741841_1017 (size=5242) 2024-12-09T05:18:46,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741841_1017 (size=5242) 2024-12-09T05:18:46,405 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=94 B at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740/.tmp/table/dcf214180f054f3f9c73278f95805168 2024-12-09T05:18:46,410 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740/.tmp/info/52242b10f9eb45a5acdb60dd5b589004 as hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740/info/52242b10f9eb45a5acdb60dd5b589004 2024-12-09T05:18:46,414 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740/info/52242b10f9eb45a5acdb60dd5b589004, entries=10, sequenceid=9, filesize=6.4 K 2024-12-09T05:18:46,415 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740/.tmp/table/dcf214180f054f3f9c73278f95805168 as hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740/table/dcf214180f054f3f9c73278f95805168 2024-12-09T05:18:46,419 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740/table/dcf214180f054f3f9c73278f95805168, entries=2, sequenceid=9, filesize=5.1 K 2024-12-09T05:18:46,420 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~1.23 KB/1264, heapSize ~2.59 KB/2648, currentSize=0 B/0 for 1588230740 in 68ms, sequenceid=9, compaction requested=false 2024-12-09T05:18:46,420 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T05:18:46,424 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/data/hbase/meta/1588230740/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-09T05:18:46,425 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T05:18:46,425 INFO [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T05:18:46,425 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T05:18:46,425 DEBUG [RS_CLOSE_META-regionserver/41a709354867:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T05:18:46,552 INFO [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,44067,1733721525093; all regions closed. 2024-12-09T05:18:46,552 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/41a709354867,44067,1733721525093 2024-12-09T05:18:46,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741834_1010 (size=2484) 2024-12-09T05:18:46,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741834_1010 (size=2484) 2024-12-09T05:18:46,556 DEBUG [RS:0;41a709354867:44067 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/oldWALs 2024-12-09T05:18:46,556 INFO [RS:0;41a709354867:44067 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 41a709354867%2C44067%2C1733721525093.meta:.meta(num 1733721525838) 2024-12-09T05:18:46,557 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/WALs/41a709354867,44067,1733721525093 2024-12-09T05:18:46,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741833_1009 (size=1414) 2024-12-09T05:18:46,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741833_1009 (size=1414) 2024-12-09T05:18:46,560 DEBUG [RS:0;41a709354867:44067 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/oldWALs 2024-12-09T05:18:46,560 INFO [RS:0;41a709354867:44067 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 41a709354867%2C44067%2C1733721525093:(num 1733721525478) 2024-12-09T05:18:46,560 DEBUG [RS:0;41a709354867:44067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:18:46,560 INFO [RS:0;41a709354867:44067 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:18:46,560 INFO [RS:0;41a709354867:44067 {}] hbase.ChoreService(370): Chore service for: regionserver/41a709354867:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T05:18:46,560 INFO [regionserver/41a709354867:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:18:46,561 INFO [RS:0;41a709354867:44067 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44067 2024-12-09T05:18:46,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/41a709354867,44067,1733721525093 2024-12-09T05:18:46,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:18:46,564 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [41a709354867,44067,1733721525093] 2024-12-09T05:18:46,564 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 41a709354867,44067,1733721525093; numProcessing=1 2024-12-09T05:18:46,566 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/41a709354867,44067,1733721525093 already deleted, retry=false 2024-12-09T05:18:46,566 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 41a709354867,44067,1733721525093 expired; onlineServers=0 2024-12-09T05:18:46,566 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '41a709354867,34021,1733721525051' ***** 2024-12-09T05:18:46,566 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T05:18:46,566 DEBUG [M:0;41a709354867:34021 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5095fe31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=41a709354867/172.17.0.2:0 2024-12-09T05:18:46,566 INFO [M:0;41a709354867:34021 {}] regionserver.HRegionServer(1224): stopping server 41a709354867,34021,1733721525051 2024-12-09T05:18:46,566 INFO [M:0;41a709354867:34021 {}] regionserver.HRegionServer(1250): stopping server 41a709354867,34021,1733721525051; all regions closed. 2024-12-09T05:18:46,566 DEBUG [M:0;41a709354867:34021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:18:46,566 DEBUG [M:0;41a709354867:34021 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T05:18:46,567 DEBUG [M:0;41a709354867:34021 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T05:18:46,567 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T05:18:46,567 DEBUG [master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721525238 {}] cleaner.HFileCleaner(306): Exit Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.small.0-1733721525238,5,FailOnTimeoutGroup] 2024-12-09T05:18:46,567 DEBUG [master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721525238 {}] cleaner.HFileCleaner(306): Exit Thread[master/41a709354867:0:becomeActiveMaster-HFileCleaner.large.0-1733721525238,5,FailOnTimeoutGroup] 2024-12-09T05:18:46,567 INFO [M:0;41a709354867:34021 {}] hbase.ChoreService(370): Chore service for: master/41a709354867:0 had [] on shutdown 2024-12-09T05:18:46,567 DEBUG [M:0;41a709354867:34021 {}] master.HMaster(1733): Stopping service threads 2024-12-09T05:18:46,567 INFO [M:0;41a709354867:34021 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T05:18:46,567 INFO [M:0;41a709354867:34021 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T05:18:46,567 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T05:18:46,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T05:18:46,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:18:46,568 DEBUG [M:0;41a709354867:34021 {}] zookeeper.ZKUtil(347): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T05:18:46,568 WARN [M:0;41a709354867:34021 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T05:18:46,568 INFO [M:0;41a709354867:34021 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-09T05:18:46,568 INFO [M:0;41a709354867:34021 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T05:18:46,568 DEBUG [M:0;41a709354867:34021 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:18:46,568 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:18:46,568 INFO [M:0;41a709354867:34021 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:18:46,568 DEBUG [M:0;41a709354867:34021 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:18:46,568 DEBUG [M:0;41a709354867:34021 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:18:46,568 DEBUG [M:0;41a709354867:34021 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:18:46,569 INFO [M:0;41a709354867:34021 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=25.32 KB heapSize=32.31 KB 2024-12-09T05:18:46,584 DEBUG [M:0;41a709354867:34021 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1440816da03e4c37bab04fb48a710b2d is 82, key is hbase:meta,,1/info:regioninfo/1733721525855/Put/seqid=0 2024-12-09T05:18:46,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741842_1018 (size=5672) 2024-12-09T05:18:46,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741842_1018 (size=5672) 2024-12-09T05:18:46,589 INFO [M:0;41a709354867:34021 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1440816da03e4c37bab04fb48a710b2d 2024-12-09T05:18:46,607 DEBUG [M:0;41a709354867:34021 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ab76ea249eda49f7b492f79480d6cff4 is 696, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733721526235/Put/seqid=0 2024-12-09T05:18:46,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741843_1019 (size=6626) 2024-12-09T05:18:46,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741843_1019 (size=6626) 2024-12-09T05:18:46,613 INFO [M:0;41a709354867:34021 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.72 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ab76ea249eda49f7b492f79480d6cff4 2024-12-09T05:18:46,631 DEBUG [M:0;41a709354867:34021 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e3b06270aa1240eeb07f738477a2f84f is 69, key is 41a709354867,44067,1733721525093/rs:state/1733721525336/Put/seqid=0 2024-12-09T05:18:46,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741844_1020 (size=5156) 2024-12-09T05:18:46,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741844_1020 (size=5156) 2024-12-09T05:18:46,640 INFO [M:0;41a709354867:34021 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e3b06270aa1240eeb07f738477a2f84f 2024-12-09T05:18:46,658 DEBUG [M:0;41a709354867:34021 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fc94e19807f04e6283acfe3777d88324 is 52, key is load_balancer_on/state:d/1733721526325/Put/seqid=0 2024-12-09T05:18:46,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741845_1021 (size=5056) 2024-12-09T05:18:46,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741845_1021 (size=5056) 2024-12-09T05:18:46,663 INFO [M:0;41a709354867:34021 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fc94e19807f04e6283acfe3777d88324 2024-12-09T05:18:46,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:18:46,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44067-0x1007536c4140001, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:18:46,664 INFO [RS:0;41a709354867:44067 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,44067,1733721525093; zookeeper connection closed. 2024-12-09T05:18:46,664 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@76d7a4a9 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@76d7a4a9 2024-12-09T05:18:46,665 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T05:18:46,668 DEBUG [M:0;41a709354867:34021 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1440816da03e4c37bab04fb48a710b2d as hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1440816da03e4c37bab04fb48a710b2d 2024-12-09T05:18:46,672 INFO [M:0;41a709354867:34021 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1440816da03e4c37bab04fb48a710b2d, entries=8, sequenceid=70, filesize=5.5 K 2024-12-09T05:18:46,673 DEBUG [M:0;41a709354867:34021 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ab76ea249eda49f7b492f79480d6cff4 as hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ab76ea249eda49f7b492f79480d6cff4 2024-12-09T05:18:46,677 INFO [M:0;41a709354867:34021 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ab76ea249eda49f7b492f79480d6cff4, entries=8, sequenceid=70, filesize=6.5 K 2024-12-09T05:18:46,677 DEBUG [M:0;41a709354867:34021 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e3b06270aa1240eeb07f738477a2f84f as hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e3b06270aa1240eeb07f738477a2f84f 2024-12-09T05:18:46,681 INFO [M:0;41a709354867:34021 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e3b06270aa1240eeb07f738477a2f84f, entries=1, sequenceid=70, filesize=5.0 K 2024-12-09T05:18:46,682 DEBUG [M:0;41a709354867:34021 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fc94e19807f04e6283acfe3777d88324 as hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fc94e19807f04e6283acfe3777d88324 2024-12-09T05:18:46,686 INFO [M:0;41a709354867:34021 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38357/user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fc94e19807f04e6283acfe3777d88324, entries=1, sequenceid=70, filesize=4.9 K 2024-12-09T05:18:46,687 INFO [M:0;41a709354867:34021 {}] regionserver.HRegion(3040): Finished flush of dataSize ~25.32 KB/25929, heapSize ~32.25 KB/33024, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=70, compaction requested=false 2024-12-09T05:18:46,689 INFO [M:0;41a709354867:34021 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:18:46,689 DEBUG [M:0;41a709354867:34021 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T05:18:46,689 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/71f78105-590a-a54f-c4d5-fa4be600d933/MasterData/WALs/41a709354867,34021,1733721525051 2024-12-09T05:18:46,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33683 is added to blk_1073741830_1006 (size=31030) 2024-12-09T05:18:46,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36235 is added to blk_1073741830_1006 (size=31030) 2024-12-09T05:18:46,691 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T05:18:46,691 INFO [M:0;41a709354867:34021 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-09T05:18:46,692 INFO [M:0;41a709354867:34021 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34021 2024-12-09T05:18:46,694 DEBUG [M:0;41a709354867:34021 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/41a709354867,34021,1733721525051 already deleted, retry=false 2024-12-09T05:18:46,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:18:46,796 INFO [M:0;41a709354867:34021 {}] regionserver.HRegionServer(1307): Exiting; stopping=41a709354867,34021,1733721525051; zookeeper connection closed. 2024-12-09T05:18:46,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34021-0x1007536c4140000, quorum=127.0.0.1:54454, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:18:46,798 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4436e022{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:18:46,799 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e2ee2b4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:18:46,799 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:18:46,799 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56cc2cd2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:18:46,799 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22708c2b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/hadoop.log.dir/,STOPPED} 2024-12-09T05:18:46,800 WARN [BP-1423372129-172.17.0.2-1733721524336 heartbeating to localhost/127.0.0.1:38357 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:18:46,800 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:18:46,800 WARN [BP-1423372129-172.17.0.2-1733721524336 heartbeating to localhost/127.0.0.1:38357 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1423372129-172.17.0.2-1733721524336 (Datanode Uuid 492aaf1f-1491-4c6f-ad30-5d83af665927) service to localhost/127.0.0.1:38357 2024-12-09T05:18:46,800 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:18:46,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/cluster_17faa25c-b3ef-701d-69ee-01881b530144/dfs/data/data3/current/BP-1423372129-172.17.0.2-1733721524336 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:18:46,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/cluster_17faa25c-b3ef-701d-69ee-01881b530144/dfs/data/data4/current/BP-1423372129-172.17.0.2-1733721524336 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:18:46,801 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:18:46,803 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53856e5b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:18:46,804 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fd2934a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:18:46,804 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:18:46,804 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@635c060d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:18:46,804 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a725035{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/hadoop.log.dir/,STOPPED} 2024-12-09T05:18:46,806 WARN [BP-1423372129-172.17.0.2-1733721524336 heartbeating to localhost/127.0.0.1:38357 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:18:46,807 WARN [BP-1423372129-172.17.0.2-1733721524336 heartbeating to localhost/127.0.0.1:38357 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1423372129-172.17.0.2-1733721524336 (Datanode Uuid 8f9658a8-ae1c-446d-ae53-b9323fe3a2a0) service to localhost/127.0.0.1:38357 2024-12-09T05:18:46,807 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:18:46,807 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:18:46,807 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/cluster_17faa25c-b3ef-701d-69ee-01881b530144/dfs/data/data1/current/BP-1423372129-172.17.0.2-1733721524336 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:18:46,807 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/cluster_17faa25c-b3ef-701d-69ee-01881b530144/dfs/data/data2/current/BP-1423372129-172.17.0.2-1733721524336 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:18:46,808 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:18:46,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6424c570{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:18:46,813 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14479e2e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:18:46,813 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:18:46,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@206bee66{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:18:46,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f05120e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1e478ac5-1e0f-5730-778e-20b4acc9db9e/hadoop.log.dir/,STOPPED} 2024-12-09T05:18:46,819 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-09T05:18:46,834 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-09T05:18:46,842 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=147 (was 124) - Thread LEAK? -, OpenFileDescriptor=517 (was 487) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=125 (was 125), ProcessCount=11 (was 11), AvailableMemoryMB=7790 (was 7806)