2024-11-22 03:34:41,698 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@69c81773 2024-11-22 03:34:41,715 main DEBUG Took 0.014046 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-22 03:34:41,716 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-22 03:34:41,716 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-22 03:34:41,718 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-22 03:34:41,721 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,731 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-22 03:34:41,748 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,750 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,751 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,751 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,752 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,753 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,754 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,755 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,756 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,756 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,758 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,758 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,759 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,759 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,760 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,761 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,762 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,763 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,763 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,764 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,765 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,765 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,766 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,766 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:34:41,767 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,767 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-22 03:34:41,770 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:34:41,773 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-22 03:34:41,778 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-22 03:34:41,779 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-22 03:34:41,781 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-22 03:34:41,782 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-22 03:34:41,798 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-22 03:34:41,802 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-22 03:34:41,804 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-22 03:34:41,805 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-22 03:34:41,806 main DEBUG createAppenders(={Console}) 2024-11-22 03:34:41,807 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@69c81773 initialized 2024-11-22 03:34:41,807 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@69c81773 2024-11-22 03:34:41,808 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@69c81773 OK. 2024-11-22 03:34:41,809 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-22 03:34:41,809 main DEBUG OutputStream closed 2024-11-22 03:34:41,810 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-22 03:34:41,810 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-22 03:34:41,810 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@3e11f9e9 OK 2024-11-22 03:34:41,924 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-22 03:34:41,927 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-22 03:34:41,929 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-22 03:34:41,930 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-22 03:34:41,931 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-22 03:34:41,932 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-22 03:34:41,932 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-22 03:34:41,933 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-22 03:34:41,936 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-22 03:34:41,937 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-22 03:34:41,937 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-22 03:34:41,938 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-22 03:34:41,938 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-22 03:34:41,939 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-22 03:34:41,939 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-22 03:34:41,939 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-22 03:34:41,940 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-22 03:34:41,941 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-22 03:34:41,945 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22 03:34:41,945 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@21fd5faa) with optional ClassLoader: null 2024-11-22 03:34:41,946 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-22 03:34:41,947 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@21fd5faa] started OK. 2024-11-22T03:34:42,371 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8 2024-11-22 03:34:42,376 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-22 03:34:42,377 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22T03:34:42,392 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-22T03:34:42,444 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=286, ProcessCount=11, AvailableMemoryMB=7641 2024-11-22T03:34:42,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:34:42,467 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/cluster_aeeb5155-ad5b-8730-4b04-32c9067c52e8, deleteOnExit=true 2024-11-22T03:34:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:34:42,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/test.cache.data in system properties and HBase conf 2024-11-22T03:34:42,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:34:42,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:34:42,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:34:42,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:34:42,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:34:42,587 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-22T03:34:42,698 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:34:42,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:34:42,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:34:42,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:34:42,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:34:42,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:34:42,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:34:42,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:34:42,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:34:42,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:34:42,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:34:42,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:34:42,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:34:42,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:34:42,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:34:43,259 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:34:43,881 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-22T03:34:43,949 INFO [Time-limited test {}] log.Log(170): Logging initialized @3365ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-22T03:34:44,018 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:34:44,083 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:34:44,100 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:34:44,100 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:34:44,101 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:34:44,112 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:34:44,115 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73b23f80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:34:44,116 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18d8eba1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:34:44,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@595f45d4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/java.io.tmpdir/jetty-localhost-37789-hadoop-hdfs-3_4_1-tests_jar-_-any-1436984476184371195/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:34:44,305 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1298d5a2{HTTP/1.1, (http/1.1)}{localhost:37789} 2024-11-22T03:34:44,305 INFO [Time-limited test {}] server.Server(415): Started @3723ms 2024-11-22T03:34:44,329 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:34:44,861 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:34:44,872 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:34:44,873 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:34:44,873 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:34:44,874 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:34:44,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@198fe7a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:34:44,876 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1493401e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:34:44,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1442ffa6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/java.io.tmpdir/jetty-localhost-36741-hadoop-hdfs-3_4_1-tests_jar-_-any-16455998197007422721/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:34:44,983 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@46cab4e7{HTTP/1.1, (http/1.1)}{localhost:36741} 2024-11-22T03:34:44,983 INFO [Time-limited test {}] server.Server(415): Started @4401ms 2024-11-22T03:34:45,035 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:34:45,147 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:34:45,153 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:34:45,159 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:34:45,159 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:34:45,160 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:34:45,161 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e06ea5e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:34:45,162 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65b328c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:34:45,267 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e34e63e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/java.io.tmpdir/jetty-localhost-33575-hadoop-hdfs-3_4_1-tests_jar-_-any-3596547743821652494/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:34:45,268 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5300105e{HTTP/1.1, (http/1.1)}{localhost:33575} 2024-11-22T03:34:45,268 INFO [Time-limited test {}] server.Server(415): Started @4686ms 2024-11-22T03:34:45,271 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:34:46,476 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/cluster_aeeb5155-ad5b-8730-4b04-32c9067c52e8/data/data2/current/BP-1930816406-172.17.0.3-1732246483340/current, will proceed with Du for space computation calculation, 2024-11-22T03:34:46,476 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/cluster_aeeb5155-ad5b-8730-4b04-32c9067c52e8/data/data1/current/BP-1930816406-172.17.0.3-1732246483340/current, will proceed with Du for space computation calculation, 2024-11-22T03:34:46,476 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/cluster_aeeb5155-ad5b-8730-4b04-32c9067c52e8/data/data4/current/BP-1930816406-172.17.0.3-1732246483340/current, will proceed with Du for space computation calculation, 2024-11-22T03:34:46,476 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/cluster_aeeb5155-ad5b-8730-4b04-32c9067c52e8/data/data3/current/BP-1930816406-172.17.0.3-1732246483340/current, will proceed with Du for space computation calculation, 2024-11-22T03:34:46,509 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:34:46,509 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:34:46,555 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e4ca123fb1c7c9b with lease ID 0xb9b6a756e0d25ae7: Processing first storage report for DS-5a87e29a-0a99-45d9-b381-dae53e8839fb from datanode DatanodeRegistration(127.0.0.1:37329, datanodeUuid=2ef4eabc-106a-4bc4-9c27-c5b88a04f260, infoPort=41755, infoSecurePort=0, ipcPort=35599, storageInfo=lv=-57;cid=testClusterID;nsid=428849210;c=1732246483340) 2024-11-22T03:34:46,556 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e4ca123fb1c7c9b with lease ID 0xb9b6a756e0d25ae7: from storage DS-5a87e29a-0a99-45d9-b381-dae53e8839fb node DatanodeRegistration(127.0.0.1:37329, datanodeUuid=2ef4eabc-106a-4bc4-9c27-c5b88a04f260, infoPort=41755, infoSecurePort=0, ipcPort=35599, storageInfo=lv=-57;cid=testClusterID;nsid=428849210;c=1732246483340), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T03:34:46,557 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9783877afab9708c with lease ID 0xb9b6a756e0d25ae6: Processing first storage report for DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428 from datanode DatanodeRegistration(127.0.0.1:36143, datanodeUuid=2f766952-069e-4971-a304-82755b16f133, infoPort=43111, infoSecurePort=0, ipcPort=43509, storageInfo=lv=-57;cid=testClusterID;nsid=428849210;c=1732246483340) 2024-11-22T03:34:46,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9783877afab9708c with lease ID 0xb9b6a756e0d25ae6: from storage DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428 node DatanodeRegistration(127.0.0.1:36143, datanodeUuid=2f766952-069e-4971-a304-82755b16f133, infoPort=43111, infoSecurePort=0, ipcPort=43509, storageInfo=lv=-57;cid=testClusterID;nsid=428849210;c=1732246483340), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:34:46,557 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e4ca123fb1c7c9b with lease ID 0xb9b6a756e0d25ae7: Processing first storage report for DS-0fb38855-a986-4578-aeb8-ad4f17a008e2 from datanode DatanodeRegistration(127.0.0.1:37329, datanodeUuid=2ef4eabc-106a-4bc4-9c27-c5b88a04f260, infoPort=41755, infoSecurePort=0, ipcPort=35599, storageInfo=lv=-57;cid=testClusterID;nsid=428849210;c=1732246483340) 2024-11-22T03:34:46,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e4ca123fb1c7c9b with lease ID 0xb9b6a756e0d25ae7: from storage DS-0fb38855-a986-4578-aeb8-ad4f17a008e2 node DatanodeRegistration(127.0.0.1:37329, datanodeUuid=2ef4eabc-106a-4bc4-9c27-c5b88a04f260, infoPort=41755, infoSecurePort=0, ipcPort=35599, storageInfo=lv=-57;cid=testClusterID;nsid=428849210;c=1732246483340), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:34:46,558 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9783877afab9708c with lease ID 0xb9b6a756e0d25ae6: Processing first storage report for DS-a90f0298-6ffc-4f70-9907-911eed3a43d4 from datanode DatanodeRegistration(127.0.0.1:36143, datanodeUuid=2f766952-069e-4971-a304-82755b16f133, infoPort=43111, infoSecurePort=0, ipcPort=43509, storageInfo=lv=-57;cid=testClusterID;nsid=428849210;c=1732246483340) 2024-11-22T03:34:46,558 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9783877afab9708c with lease ID 0xb9b6a756e0d25ae6: from storage DS-a90f0298-6ffc-4f70-9907-911eed3a43d4 node DatanodeRegistration(127.0.0.1:36143, datanodeUuid=2f766952-069e-4971-a304-82755b16f133, infoPort=43111, infoSecurePort=0, ipcPort=43509, storageInfo=lv=-57;cid=testClusterID;nsid=428849210;c=1732246483340), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:34:46,591 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8 2024-11-22T03:34:46,651 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/cluster_aeeb5155-ad5b-8730-4b04-32c9067c52e8/zookeeper_0, clientPort=50067, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/cluster_aeeb5155-ad5b-8730-4b04-32c9067c52e8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/cluster_aeeb5155-ad5b-8730-4b04-32c9067c52e8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:34:46,659 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50067 2024-11-22T03:34:46,669 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:34:46,671 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:34:46,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:34:46,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:34:47,282 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4 with version=8 2024-11-22T03:34:47,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/hbase-staging 2024-11-22T03:34:47,366 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-22T03:34:47,648 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:34:47,658 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:34:47,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:34:47,663 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:34:47,664 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:34:47,664 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:34:47,839 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:34:47,895 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-22T03:34:47,907 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-22T03:34:47,911 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:34:47,933 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 97912 (auto-detected) 2024-11-22T03:34:47,934 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-22T03:34:47,951 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36935 2024-11-22T03:34:47,971 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36935 connecting to ZooKeeper ensemble=127.0.0.1:50067 2024-11-22T03:34:48,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:369350x0, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:34:48,098 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36935-0x101609bba170000 connected 2024-11-22T03:34:48,189 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:34:48,193 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:34:48,205 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:34:48,209 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4, hbase.cluster.distributed=false 2024-11-22T03:34:48,233 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:34:48,238 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36935 2024-11-22T03:34:48,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36935 2024-11-22T03:34:48,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36935 2024-11-22T03:34:48,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36935 2024-11-22T03:34:48,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36935 2024-11-22T03:34:48,339 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:34:48,341 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:34:48,341 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:34:48,342 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:34:48,342 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:34:48,342 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:34:48,345 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:34:48,350 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:34:48,351 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44135 2024-11-22T03:34:48,355 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44135 connecting to ZooKeeper ensemble=127.0.0.1:50067 2024-11-22T03:34:48,356 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:34:48,364 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:34:48,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:441350x0, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:34:48,403 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:441350x0, quorum=127.0.0.1:50067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:34:48,404 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44135-0x101609bba170001 connected 2024-11-22T03:34:48,407 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:34:48,416 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:34:48,418 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:34:48,425 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:34:48,427 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44135 2024-11-22T03:34:48,428 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44135 2024-11-22T03:34:48,432 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44135 2024-11-22T03:34:48,436 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44135 2024-11-22T03:34:48,436 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44135 2024-11-22T03:34:48,458 DEBUG [M:0;b458937b0f5f:36935 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b458937b0f5f:36935 2024-11-22T03:34:48,460 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b458937b0f5f,36935,1732246487457 2024-11-22T03:34:48,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:34:48,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:34:48,478 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b458937b0f5f,36935,1732246487457 2024-11-22T03:34:48,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:34:48,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:34:48,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:34:48,508 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:34:48,509 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b458937b0f5f,36935,1732246487457 from backup master directory 2024-11-22T03:34:48,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:34:48,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b458937b0f5f,36935,1732246487457 2024-11-22T03:34:48,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:34:48,524 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:34:48,524 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b458937b0f5f,36935,1732246487457 2024-11-22T03:34:48,526 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-22T03:34:48,528 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-22T03:34:48,595 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/hbase.id] with ID: aefe6c33-f1ce-47b0-9359-03c20661ccaa 2024-11-22T03:34:48,595 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/.tmp/hbase.id 2024-11-22T03:34:48,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:34:48,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:34:48,623 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/.tmp/hbase.id]:[hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/hbase.id] 2024-11-22T03:34:48,682 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:34:48,687 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:34:48,706 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-22T03:34:48,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:34:48,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:34:48,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:34:48,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:34:48,757 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:34:48,759 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:34:48,767 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:34:48,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:34:48,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:34:49,222 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store 2024-11-22T03:34:49,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:34:49,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:34:49,255 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-22T03:34:49,258 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:34:49,259 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:34:49,260 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:34:49,260 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:34:49,262 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:34:49,262 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:34:49,262 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:34:49,263 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246489259Disabling compacts and flushes for region at 1732246489259Disabling writes for close at 1732246489262 (+3 ms)Writing region close event to WAL at 1732246489262Closed at 1732246489262 2024-11-22T03:34:49,266 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/.initializing 2024-11-22T03:34:49,266 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/WALs/b458937b0f5f,36935,1732246487457 2024-11-22T03:34:49,290 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C36935%2C1732246487457, suffix=, logDir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/WALs/b458937b0f5f,36935,1732246487457, archiveDir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/oldWALs, maxLogs=10 2024-11-22T03:34:49,302 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C36935%2C1732246487457.1732246489296 2024-11-22T03:34:49,328 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/WALs/b458937b0f5f,36935,1732246487457/b458937b0f5f%2C36935%2C1732246487457.1732246489296 2024-11-22T03:34:49,342 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41755:41755),(127.0.0.1/127.0.0.1:43111:43111)] 2024-11-22T03:34:49,349 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:34:49,349 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:34:49,354 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:34:49,355 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:34:49,400 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:34:49,426 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:34:49,429 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:49,432 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:34:49,432 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:34:49,436 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:34:49,436 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:49,437 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:34:49,438 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:34:49,441 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:34:49,441 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:49,442 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:34:49,443 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:34:49,446 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:34:49,447 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:49,448 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:34:49,448 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:34:49,453 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:34:49,455 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:34:49,462 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:34:49,463 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:34:49,468 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:34:49,473 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:34:49,479 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:34:49,480 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692773, jitterRate=-0.11909469962120056}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:34:49,487 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732246489370Initializing all the Stores at 1732246489373 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246489374 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246489375 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246489376 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246489376Cleaning up temporary data from old regions at 1732246489463 (+87 ms)Region opened successfully at 1732246489487 (+24 ms) 2024-11-22T03:34:49,489 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:34:49,525 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57ab7774, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:34:49,564 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:34:49,577 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:34:49,578 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:34:49,582 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:34:49,584 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-22T03:34:49,589 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-22T03:34:49,590 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:34:49,622 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:34:49,635 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:34:49,718 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:34:49,720 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:34:49,722 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:34:49,733 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:34:49,736 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:34:49,742 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:34:49,754 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:34:49,756 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:34:49,765 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:34:49,781 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:34:49,791 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:34:49,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:34:49,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:34:49,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:34:49,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:34:49,810 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b458937b0f5f,36935,1732246487457, sessionid=0x101609bba170000, setting cluster-up flag (Was=false) 2024-11-22T03:34:49,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:34:49,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:34:49,876 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:34:49,878 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,36935,1732246487457 2024-11-22T03:34:49,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:34:49,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:34:49,933 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:34:49,936 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,36935,1732246487457 2024-11-22T03:34:49,944 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:34:50,041 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:34:50,042 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(746): ClusterId : aefe6c33-f1ce-47b0-9359-03c20661ccaa 2024-11-22T03:34:50,044 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:34:50,051 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:34:50,057 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:34:50,057 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:34:50,058 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:34:50,063 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b458937b0f5f,36935,1732246487457 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:34:50,071 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:34:50,072 DEBUG [RS:0;b458937b0f5f:44135 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d2f020b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:34:50,072 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:34:50,073 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:34:50,073 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:34:50,073 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:34:50,073 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b458937b0f5f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:34:50,073 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,073 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:34:50,073 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,077 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732246520077 2024-11-22T03:34:50,078 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:34:50,078 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:34:50,079 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:34:50,080 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:34:50,084 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:50,084 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:34:50,084 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:34:50,085 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:34:50,085 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:34:50,085 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:34:50,086 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,089 DEBUG [RS:0;b458937b0f5f:44135 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b458937b0f5f:44135 2024-11-22T03:34:50,091 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:34:50,092 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:34:50,093 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:34:50,093 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:34:50,094 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:34:50,094 DEBUG [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:34:50,095 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:34:50,095 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:34:50,096 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(2659): reportForDuty to master=b458937b0f5f,36935,1732246487457 with port=44135, startcode=1732246488301 2024-11-22T03:34:50,097 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246490097,5,FailOnTimeoutGroup] 2024-11-22T03:34:50,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:34:50,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:34:50,100 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246490097,5,FailOnTimeoutGroup] 2024-11-22T03:34:50,100 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:34:50,100 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,101 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:34:50,101 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4 2024-11-22T03:34:50,102 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,102 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,108 DEBUG [RS:0;b458937b0f5f:44135 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:34:50,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:34:50,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:34:50,114 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:34:50,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:34:50,119 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:34:50,119 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:50,120 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:34:50,121 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:34:50,124 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:34:50,124 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:50,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:34:50,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:34:50,129 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:34:50,129 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:50,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:34:50,131 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:34:50,133 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:34:50,133 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:50,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:34:50,134 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:34:50,136 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740 2024-11-22T03:34:50,137 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740 2024-11-22T03:34:50,139 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:34:50,139 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:34:50,140 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:34:50,143 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:34:50,147 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:34:50,148 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713018, jitterRate=-0.09335179626941681}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:34:50,153 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732246490114Initializing all the Stores at 1732246490116 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246490116Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246490116Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246490116Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246490116Cleaning up temporary data from old regions at 1732246490139 (+23 ms)Region opened successfully at 1732246490153 (+14 ms) 2024-11-22T03:34:50,153 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:34:50,153 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:34:50,154 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:34:50,154 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:34:50,154 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:34:50,155 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:34:50,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246490153Disabling compacts and flushes for region at 1732246490153Disabling writes for close at 1732246490154 (+1 ms)Writing region close event to WAL at 1732246490154Closed at 1732246490155 (+1 ms) 2024-11-22T03:34:50,158 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:34:50,158 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:34:50,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:34:50,171 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:34:50,172 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60995, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:34:50,174 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:34:50,178 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36935 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b458937b0f5f,44135,1732246488301 2024-11-22T03:34:50,180 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36935 {}] master.ServerManager(517): Registering regionserver=b458937b0f5f,44135,1732246488301 2024-11-22T03:34:50,195 DEBUG [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4 2024-11-22T03:34:50,195 DEBUG [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35981 2024-11-22T03:34:50,195 DEBUG [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:34:50,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:34:50,207 DEBUG [RS:0;b458937b0f5f:44135 {}] zookeeper.ZKUtil(111): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b458937b0f5f,44135,1732246488301 2024-11-22T03:34:50,208 WARN [RS:0;b458937b0f5f:44135 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:34:50,208 INFO [RS:0;b458937b0f5f:44135 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:34:50,208 DEBUG [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301 2024-11-22T03:34:50,210 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b458937b0f5f,44135,1732246488301] 2024-11-22T03:34:50,241 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:34:50,261 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:34:50,267 INFO [RS:0;b458937b0f5f:44135 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:34:50,268 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,269 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:34:50,276 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:34:50,278 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,279 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,279 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,279 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,280 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,280 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,280 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:34:50,281 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,281 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,281 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,281 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,281 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,282 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:34:50,282 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:34:50,282 DEBUG [RS:0;b458937b0f5f:44135 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:34:50,284 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,284 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,284 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,285 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,285 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,285 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,44135,1732246488301-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:34:50,303 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:34:50,305 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,44135,1732246488301-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,306 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,306 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.Replication(171): b458937b0f5f,44135,1732246488301 started 2024-11-22T03:34:50,324 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:50,324 WARN [b458937b0f5f:36935 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:34:50,325 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(1482): Serving as b458937b0f5f,44135,1732246488301, RpcServer on b458937b0f5f/172.17.0.3:44135, sessionid=0x101609bba170001 2024-11-22T03:34:50,325 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:34:50,326 DEBUG [RS:0;b458937b0f5f:44135 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b458937b0f5f,44135,1732246488301 2024-11-22T03:34:50,326 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,44135,1732246488301' 2024-11-22T03:34:50,326 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:34:50,327 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:34:50,328 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:34:50,328 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:34:50,328 DEBUG [RS:0;b458937b0f5f:44135 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b458937b0f5f,44135,1732246488301 2024-11-22T03:34:50,328 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,44135,1732246488301' 2024-11-22T03:34:50,328 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:34:50,329 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:34:50,330 DEBUG [RS:0;b458937b0f5f:44135 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:34:50,330 INFO [RS:0;b458937b0f5f:44135 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:34:50,330 INFO [RS:0;b458937b0f5f:44135 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:34:50,445 INFO [RS:0;b458937b0f5f:44135 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C44135%2C1732246488301, suffix=, logDir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301, archiveDir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/oldWALs, maxLogs=32 2024-11-22T03:34:50,447 INFO [RS:0;b458937b0f5f:44135 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C44135%2C1732246488301.1732246490447 2024-11-22T03:34:50,458 INFO [RS:0;b458937b0f5f:44135 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246490447 2024-11-22T03:34:50,460 DEBUG [RS:0;b458937b0f5f:44135 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43111:43111),(127.0.0.1/127.0.0.1:41755:41755)] 2024-11-22T03:34:50,577 DEBUG [b458937b0f5f:36935 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:34:50,587 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b458937b0f5f,44135,1732246488301 2024-11-22T03:34:50,593 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,44135,1732246488301, state=OPENING 2024-11-22T03:34:50,606 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:34:50,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:34:50,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:34:50,618 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:34:50,618 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:34:50,621 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:34:50,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,44135,1732246488301}] 2024-11-22T03:34:50,806 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:34:50,810 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40919, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:34:50,823 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:34:50,823 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:34:50,828 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C44135%2C1732246488301.meta, suffix=.meta, logDir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301, archiveDir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/oldWALs, maxLogs=32 2024-11-22T03:34:50,831 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C44135%2C1732246488301.meta.1732246490831.meta 2024-11-22T03:34:50,842 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.meta.1732246490831.meta 2024-11-22T03:34:50,865 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43111:43111),(127.0.0.1/127.0.0.1:41755:41755)] 2024-11-22T03:34:50,866 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:34:50,868 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:34:50,870 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:34:50,875 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:34:50,880 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:34:50,881 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:34:50,881 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:34:50,881 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:34:50,885 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:34:50,887 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:34:50,887 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:50,888 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:34:50,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:34:50,891 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:34:50,891 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:50,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:34:50,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:34:50,895 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:34:50,895 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:50,896 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:34:50,896 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:34:50,899 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:34:50,899 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:50,900 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:34:50,900 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:34:50,902 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740 2024-11-22T03:34:50,905 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740 2024-11-22T03:34:50,908 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:34:50,908 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:34:50,909 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:34:50,912 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:34:50,914 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=709781, jitterRate=-0.09746719896793365}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:34:50,914 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:34:50,916 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732246490882Writing region info on filesystem at 1732246490882Initializing all the Stores at 1732246490884 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246490884Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246490884Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246490884Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246490884Cleaning up temporary data from old regions at 1732246490908 (+24 ms)Running coprocessor post-open hooks at 1732246490914 (+6 ms)Region opened successfully at 1732246490915 (+1 ms) 2024-11-22T03:34:50,923 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732246490798 2024-11-22T03:34:50,934 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:34:50,935 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:34:50,936 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b458937b0f5f,44135,1732246488301 2024-11-22T03:34:50,939 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,44135,1732246488301, state=OPEN 2024-11-22T03:34:51,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:34:51,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:34:51,039 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:34:51,039 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:34:51,040 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b458937b0f5f,44135,1732246488301 2024-11-22T03:34:51,047 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:34:51,047 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,44135,1732246488301 in 417 msec 2024-11-22T03:34:51,055 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:34:51,056 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 886 msec 2024-11-22T03:34:51,058 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:34:51,058 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:34:51,078 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:34:51,079 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,44135,1732246488301, seqNum=-1] 2024-11-22T03:34:51,100 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:34:51,102 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53751, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:34:51,124 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1380 sec 2024-11-22T03:34:51,124 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732246491124, completionTime=-1 2024-11-22T03:34:51,126 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:34:51,127 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:34:51,154 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:34:51,154 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732246551154 2024-11-22T03:34:51,155 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732246611155 2024-11-22T03:34:51,155 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-11-22T03:34:51,157 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,36935,1732246487457-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:51,158 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,36935,1732246487457-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:51,158 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,36935,1732246487457-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:51,159 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b458937b0f5f:36935, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:51,160 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:51,160 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:51,166 DEBUG [master/b458937b0f5f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:34:51,199 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.675sec 2024-11-22T03:34:51,200 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:34:51,202 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:34:51,204 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:34:51,204 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:34:51,205 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:34:51,206 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,36935,1732246487457-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:34:51,206 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,36935,1732246487457-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:34:51,217 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:34:51,218 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:34:51,218 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,36935,1732246487457-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:34:51,255 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e50f9c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:34:51,258 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-22T03:34:51,258 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-22T03:34:51,263 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b458937b0f5f,36935,-1 for getting cluster id 2024-11-22T03:34:51,267 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:34:51,278 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'aefe6c33-f1ce-47b0-9359-03c20661ccaa' 2024-11-22T03:34:51,283 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:34:51,283 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "aefe6c33-f1ce-47b0-9359-03c20661ccaa" 2024-11-22T03:34:51,286 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b05e32a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:34:51,286 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b458937b0f5f,36935,-1] 2024-11-22T03:34:51,290 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:34:51,295 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:34:51,300 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46004, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:34:51,304 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11d4963a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:34:51,305 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:34:51,317 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,44135,1732246488301, seqNum=-1] 2024-11-22T03:34:51,318 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:34:51,322 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44948, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:34:51,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b458937b0f5f,36935,1732246487457 2024-11-22T03:34:51,356 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:34:51,367 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:34:51,373 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T03:34:51,381 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is b458937b0f5f,36935,1732246487457 2024-11-22T03:34:51,384 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@28272c1c 2024-11-22T03:34:51,385 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T03:34:51,389 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46014, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T03:34:51,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T03:34:51,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T03:34:51,398 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:34:51,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-22T03:34:51,413 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T03:34:51,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-22T03:34:51,416 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:51,427 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T03:34:51,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:34:51,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741835_1011 (size=389) 2024-11-22T03:34:51,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741835_1011 (size=389) 2024-11-22T03:34:51,527 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 65fff0f10b766103f11a53a6ec2c07df, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4 2024-11-22T03:34:51,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741836_1012 (size=72) 2024-11-22T03:34:51,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741836_1012 (size=72) 2024-11-22T03:34:51,556 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:34:51,556 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 65fff0f10b766103f11a53a6ec2c07df, disabling compactions & flushes 2024-11-22T03:34:51,556 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:34:51,556 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:34:51,556 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. after waiting 0 ms 2024-11-22T03:34:51,557 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:34:51,557 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:34:51,557 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 65fff0f10b766103f11a53a6ec2c07df: Waiting for close lock at 1732246491556Disabling compacts and flushes for region at 1732246491556Disabling writes for close at 1732246491556Writing region close event to WAL at 1732246491557 (+1 ms)Closed at 1732246491557 2024-11-22T03:34:51,559 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T03:34:51,566 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732246491560"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732246491560"}]},"ts":"1732246491560"} 2024-11-22T03:34:51,571 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T03:34:51,574 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T03:34:51,577 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732246491574"}]},"ts":"1732246491574"} 2024-11-22T03:34:51,584 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-22T03:34:51,586 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=65fff0f10b766103f11a53a6ec2c07df, ASSIGN}] 2024-11-22T03:34:51,590 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=65fff0f10b766103f11a53a6ec2c07df, ASSIGN 2024-11-22T03:34:51,592 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=65fff0f10b766103f11a53a6ec2c07df, ASSIGN; state=OFFLINE, location=b458937b0f5f,44135,1732246488301; forceNewPlan=false, retain=false 2024-11-22T03:34:51,744 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=65fff0f10b766103f11a53a6ec2c07df, regionState=OPENING, regionLocation=b458937b0f5f,44135,1732246488301 2024-11-22T03:34:51,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=65fff0f10b766103f11a53a6ec2c07df, ASSIGN because future has completed 2024-11-22T03:34:51,751 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 65fff0f10b766103f11a53a6ec2c07df, server=b458937b0f5f,44135,1732246488301}] 2024-11-22T03:34:51,911 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:34:51,912 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 65fff0f10b766103f11a53a6ec2c07df, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:34:51,912 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:34:51,912 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:34:51,912 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:34:51,912 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:34:51,915 INFO [StoreOpener-65fff0f10b766103f11a53a6ec2c07df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:34:51,917 INFO [StoreOpener-65fff0f10b766103f11a53a6ec2c07df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 65fff0f10b766103f11a53a6ec2c07df columnFamilyName info 2024-11-22T03:34:51,917 DEBUG [StoreOpener-65fff0f10b766103f11a53a6ec2c07df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:34:51,918 INFO [StoreOpener-65fff0f10b766103f11a53a6ec2c07df-1 {}] regionserver.HStore(327): Store=65fff0f10b766103f11a53a6ec2c07df/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:34:51,919 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:34:51,920 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:34:51,921 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:34:51,922 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:34:51,922 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:34:51,925 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:34:51,928 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:34:51,929 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 65fff0f10b766103f11a53a6ec2c07df; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744082, jitterRate=-0.053852006793022156}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:34:51,929 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:34:51,930 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 65fff0f10b766103f11a53a6ec2c07df: Running coprocessor pre-open hook at 1732246491913Writing region info on filesystem at 1732246491913Initializing all the Stores at 1732246491914 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246491914Cleaning up temporary data from old regions at 1732246491922 (+8 ms)Running coprocessor post-open hooks at 1732246491929 (+7 ms)Region opened successfully at 1732246491930 (+1 ms) 2024-11-22T03:34:51,932 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df., pid=6, masterSystemTime=1732246491905 2024-11-22T03:34:51,936 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:34:51,936 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:34:51,937 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=65fff0f10b766103f11a53a6ec2c07df, regionState=OPEN, openSeqNum=2, regionLocation=b458937b0f5f,44135,1732246488301 2024-11-22T03:34:51,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 65fff0f10b766103f11a53a6ec2c07df, server=b458937b0f5f,44135,1732246488301 because future has completed 2024-11-22T03:34:51,949 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T03:34:51,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 65fff0f10b766103f11a53a6ec2c07df, server=b458937b0f5f,44135,1732246488301 in 194 msec 2024-11-22T03:34:51,954 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T03:34:51,954 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=65fff0f10b766103f11a53a6ec2c07df, ASSIGN in 363 msec 2024-11-22T03:34:51,955 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T03:34:51,956 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732246491955"}]},"ts":"1732246491955"} 2024-11-22T03:34:51,960 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-22T03:34:51,962 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T03:34:51,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 561 msec 2024-11-22T03:34:56,398 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T03:34:56,448 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T03:34:56,449 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-22T03:34:57,893 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:34:57,893 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T03:34:57,896 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-22T03:34:57,896 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T03:34:57,898 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:34:57,899 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T03:34:57,899 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T03:34:57,899 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T03:35:01,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:35:01,515 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-22T03:35:01,518 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-22T03:35:01,525 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-22T03:35:01,526 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:35:01,527 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C44135%2C1732246488301.1732246501527 2024-11-22T03:35:01,537 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:01,537 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:01,537 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:01,538 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:01,538 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:01,538 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246490447 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246501527 2024-11-22T03:35:01,540 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41755:41755),(127.0.0.1/127.0.0.1:43111:43111)] 2024-11-22T03:35:01,540 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246490447 is not closed yet, will try archiving it next time 2024-11-22T03:35:01,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741833_1009 (size=451) 2024-11-22T03:35:01,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741833_1009 (size=451) 2024-11-22T03:35:01,551 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df., hostname=b458937b0f5f,44135,1732246488301, seqNum=2] 2024-11-22T03:35:01,945 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246490447 to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/oldWALs/b458937b0f5f%2C44135%2C1732246488301.1732246490447 2024-11-22T03:35:13,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44135 {}] regionserver.HRegion(8855): Flush requested on 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:35:13,612 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 65fff0f10b766103f11a53a6ec2c07df 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:35:13,695 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/dfbe197ab3d946e6ae91923f98de60d8 is 1080, key is row0001/info:/1732246501554/Put/seqid=0 2024-11-22T03:35:13,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741838_1014 (size=12509) 2024-11-22T03:35:13,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741838_1014 (size=12509) 2024-11-22T03:35:13,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/dfbe197ab3d946e6ae91923f98de60d8 2024-11-22T03:35:13,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/dfbe197ab3d946e6ae91923f98de60d8 as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/dfbe197ab3d946e6ae91923f98de60d8 2024-11-22T03:35:13,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/dfbe197ab3d946e6ae91923f98de60d8, entries=7, sequenceid=11, filesize=12.2 K 2024-11-22T03:35:13,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 65fff0f10b766103f11a53a6ec2c07df in 198ms, sequenceid=11, compaction requested=false 2024-11-22T03:35:13,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 65fff0f10b766103f11a53a6ec2c07df: 2024-11-22T03:35:16,588 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:35:21,626 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C44135%2C1732246488301.1732246521626 2024-11-22T03:35:21,843 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK], DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK]] 2024-11-22T03:35:21,843 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:21,843 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:21,843 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:21,843 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:21,844 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:21,844 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246501527 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246521626 2024-11-22T03:35:21,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741837_1013 (size=12399) 2024-11-22T03:35:21,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741837_1013 (size=12399) 2024-11-22T03:35:21,858 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43111:43111),(127.0.0.1/127.0.0.1:41755:41755)] 2024-11-22T03:35:22,061 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:24,265 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:26,470 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:28,674 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:28,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44135 {}] regionserver.HRegion(8855): Flush requested on 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:35:28,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 65fff0f10b766103f11a53a6ec2c07df 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:35:28,876 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:28,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/4ab9b7e3bdbc4af4850978141c1d5b70 is 1080, key is row0008/info:/1732246515615/Put/seqid=0 2024-11-22T03:35:28,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741840_1016 (size=12509) 2024-11-22T03:35:28,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741840_1016 (size=12509) 2024-11-22T03:35:28,896 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/4ab9b7e3bdbc4af4850978141c1d5b70 2024-11-22T03:35:28,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/4ab9b7e3bdbc4af4850978141c1d5b70 as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/4ab9b7e3bdbc4af4850978141c1d5b70 2024-11-22T03:35:28,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/4ab9b7e3bdbc4af4850978141c1d5b70, entries=7, sequenceid=21, filesize=12.2 K 2024-11-22T03:35:29,125 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:29,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 65fff0f10b766103f11a53a6ec2c07df in 452ms, sequenceid=21, compaction requested=false 2024-11-22T03:35:29,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 65fff0f10b766103f11a53a6ec2c07df: 2024-11-22T03:35:29,126 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-22T03:35:29,127 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:35:29,127 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/dfbe197ab3d946e6ae91923f98de60d8 because midkey is the same as first or last row 2024-11-22T03:35:30,878 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:31,267 INFO [master/b458937b0f5f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T03:35:31,268 INFO [master/b458937b0f5f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T03:35:33,110 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 228 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:33,115 WARN [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:33,117 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C44135%2C1732246488301:(num 1732246521626) roll requested 2024-11-22T03:35:33,118 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C44135%2C1732246488301.1732246533117 2024-11-22T03:35:33,328 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:33,328 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:33,328 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:33,328 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:33,328 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:33,328 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:33,329 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246521626 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246533117 2024-11-22T03:35:33,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741839_1015 (size=7739) 2024-11-22T03:35:33,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741839_1015 (size=7739) 2024-11-22T03:35:33,332 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41755:41755),(127.0.0.1/127.0.0.1:43111:43111)] 2024-11-22T03:35:33,332 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246521626 is not closed yet, will try archiving it next time 2024-11-22T03:35:33,333 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246501527 to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/oldWALs/b458937b0f5f%2C44135%2C1732246488301.1732246501527 2024-11-22T03:35:35,315 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK], DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK]] 2024-11-22T03:35:36,913 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 65fff0f10b766103f11a53a6ec2c07df, had cached 0 bytes from a total of 25018 2024-11-22T03:35:37,519 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK], DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK]] 2024-11-22T03:35:39,727 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK], DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK]] 2024-11-22T03:35:41,935 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK], DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK]] 2024-11-22T03:35:43,937 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T03:35:43,937 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C44135%2C1732246488301.1732246543937 2024-11-22T03:35:46,589 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:35:48,955 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5013 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK], DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK]] 2024-11-22T03:35:48,959 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5013 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK], DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK]] 2024-11-22T03:35:48,959 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C44135%2C1732246488301:(num 1732246543937) roll requested 2024-11-22T03:35:48,959 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:48,960 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:48,960 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:48,960 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:48,960 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:48,961 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246533117 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246543937 2024-11-22T03:35:48,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741841_1017 (size=4753) 2024-11-22T03:35:48,964 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43111:43111),(127.0.0.1/127.0.0.1:41755:41755)] 2024-11-22T03:35:48,965 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246533117 is not closed yet, will try archiving it next time 2024-11-22T03:35:48,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741841_1017 (size=4753) 2024-11-22T03:35:48,965 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C44135%2C1732246488301.1732246548965 2024-11-22T03:35:53,968 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:53,969 WARN [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:53,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44135 {}] regionserver.HRegion(8855): Flush requested on 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:35:53,969 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 65fff0f10b766103f11a53a6ec2c07df 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:35:53,980 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:53,980 WARN [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:55,970 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T03:35:58,972 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:58,972 WARN [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:35:58,972 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:58,973 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:58,973 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:58,973 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:58,973 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:35:58,973 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246543937 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246548965 2024-11-22T03:35:58,974 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43111:43111),(127.0.0.1/127.0.0.1:41755:41755)] 2024-11-22T03:35:58,974 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246543937 is not closed yet, will try archiving it next time 2024-11-22T03:35:58,974 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C44135%2C1732246488301:(num 1732246548965) roll requested 2024-11-22T03:35:58,975 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C44135%2C1732246488301.1732246558975 2024-11-22T03:35:58,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741842_1018 (size=1569) 2024-11-22T03:35:58,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741842_1018 (size=1569) 2024-11-22T03:35:58,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/929709fe3b1c401aa7cbcf126ef76e95 is 1080, key is row0015/info:/1732246530676/Put/seqid=0 2024-11-22T03:35:58,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741844_1020 (size=12509) 2024-11-22T03:35:58,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741844_1020 (size=12509) 2024-11-22T03:35:58,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/929709fe3b1c401aa7cbcf126ef76e95 2024-11-22T03:35:59,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/929709fe3b1c401aa7cbcf126ef76e95 as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/929709fe3b1c401aa7cbcf126ef76e95 2024-11-22T03:35:59,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/929709fe3b1c401aa7cbcf126ef76e95, entries=7, sequenceid=31, filesize=12.2 K 2024-11-22T03:36:03,985 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:36:03,985 WARN [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:36:04,022 INFO [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:36:04,022 WARN [FSHLog-0-hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4-prefix:b458937b0f5f,44135,1732246488301 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36143,DS-52bf6e0c-c5f9-4a72-85b4-a6aa74399428,DISK], DatanodeInfoWithStorage[127.0.0.1:37329,DS-5a87e29a-0a99-45d9-b381-dae53e8839fb,DISK]] 2024-11-22T03:36:04,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 65fff0f10b766103f11a53a6ec2c07df in 10053ms, sequenceid=31, compaction requested=true 2024-11-22T03:36:04,022 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 65fff0f10b766103f11a53a6ec2c07df: 2024-11-22T03:36:04,022 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,022 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,023 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-22T03:36:04,023 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,023 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:04,023 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,023 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/dfbe197ab3d946e6ae91923f98de60d8 because midkey is the same as first or last row 2024-11-22T03:36:04,023 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246548965 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246558975 2024-11-22T03:36:04,024 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43111:43111),(127.0.0.1/127.0.0.1:41755:41755)] 2024-11-22T03:36:04,024 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246548965 is not closed yet, will try archiving it next time 2024-11-22T03:36:04,024 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246521626 to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/oldWALs/b458937b0f5f%2C44135%2C1732246488301.1732246521626 2024-11-22T03:36:04,024 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C44135%2C1732246488301:(num 1732246564024) roll requested 2024-11-22T03:36:04,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 65fff0f10b766103f11a53a6ec2c07df:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:36:04,025 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C44135%2C1732246488301.1732246564024 2024-11-22T03:36:04,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741843_1019 (size=438) 2024-11-22T03:36:04,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741843_1019 (size=438) 2024-11-22T03:36:04,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:36:04,028 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246533117 to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/oldWALs/b458937b0f5f%2C44135%2C1732246488301.1732246533117 2024-11-22T03:36:04,030 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:36:04,030 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246543937 to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/oldWALs/b458937b0f5f%2C44135%2C1732246488301.1732246543937 2024-11-22T03:36:04,032 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246548965 to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/oldWALs/b458937b0f5f%2C44135%2C1732246488301.1732246548965 2024-11-22T03:36:04,033 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:36:04,035 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.HStore(1541): 65fff0f10b766103f11a53a6ec2c07df/info is initiating minor compaction (all files) 2024-11-22T03:36:04,035 INFO [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 65fff0f10b766103f11a53a6ec2c07df/info in TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:36:04,036 INFO [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/dfbe197ab3d946e6ae91923f98de60d8, hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/4ab9b7e3bdbc4af4850978141c1d5b70, hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/929709fe3b1c401aa7cbcf126ef76e95] into tmpdir=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp, totalSize=36.6 K 2024-11-22T03:36:04,037 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] compactions.Compactor(225): Compacting dfbe197ab3d946e6ae91923f98de60d8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732246501554 2024-11-22T03:36:04,039 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ab9b7e3bdbc4af4850978141c1d5b70, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732246515615 2024-11-22T03:36:04,040 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] compactions.Compactor(225): Compacting 929709fe3b1c401aa7cbcf126ef76e95, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732246530676 2024-11-22T03:36:04,044 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,044 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,044 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,044 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,044 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,044 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246558975 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246564024 2024-11-22T03:36:04,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741845_1021 (size=93) 2024-11-22T03:36:04,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741845_1021 (size=93) 2024-11-22T03:36:04,062 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43111:43111),(127.0.0.1/127.0.0.1:41755:41755)] 2024-11-22T03:36:04,062 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246558975 is not closed yet, will try archiving it next time 2024-11-22T03:36:04,062 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C44135%2C1732246488301.1732246564062 2024-11-22T03:36:04,072 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,072 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,072 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,072 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,072 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:04,073 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246564024 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246564062 2024-11-22T03:36:04,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741846_1022 (size=1258) 2024-11-22T03:36:04,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741846_1022 (size=1258) 2024-11-22T03:36:04,077 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246558975 is not closed yet, will try archiving it next time 2024-11-22T03:36:04,078 INFO [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 65fff0f10b766103f11a53a6ec2c07df#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:36:04,079 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/eae909e73ec84115af3f3203833caee4 is 1080, key is row0001/info:/1732246501554/Put/seqid=0 2024-11-22T03:36:04,081 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41755:41755),(127.0.0.1/127.0.0.1:43111:43111)] 2024-11-22T03:36:04,081 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246558975 is not closed yet, will try archiving it next time 2024-11-22T03:36:04,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741848_1024 (size=27710) 2024-11-22T03:36:04,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741848_1024 (size=27710) 2024-11-22T03:36:04,097 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/eae909e73ec84115af3f3203833caee4 as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/eae909e73ec84115af3f3203833caee4 2024-11-22T03:36:04,115 INFO [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 65fff0f10b766103f11a53a6ec2c07df/info of 65fff0f10b766103f11a53a6ec2c07df into eae909e73ec84115af3f3203833caee4(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:36:04,115 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 65fff0f10b766103f11a53a6ec2c07df: 2024-11-22T03:36:04,117 INFO [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df., storeName=65fff0f10b766103f11a53a6ec2c07df/info, priority=13, startTime=1732246564024; duration=0sec 2024-11-22T03:36:04,117 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T03:36:04,118 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:04,118 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/eae909e73ec84115af3f3203833caee4 because midkey is the same as first or last row 2024-11-22T03:36:04,118 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T03:36:04,119 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:04,119 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/eae909e73ec84115af3f3203833caee4 because midkey is the same as first or last row 2024-11-22T03:36:04,119 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T03:36:04,119 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:04,119 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/eae909e73ec84115af3f3203833caee4 because midkey is the same as first or last row 2024-11-22T03:36:04,119 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:36:04,119 DEBUG [RS:0;b458937b0f5f:44135-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 65fff0f10b766103f11a53a6ec2c07df:info 2024-11-22T03:36:04,449 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/WALs/b458937b0f5f,44135,1732246488301/b458937b0f5f%2C44135%2C1732246488301.1732246558975 to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/oldWALs/b458937b0f5f%2C44135%2C1732246488301.1732246558975 2024-11-22T03:36:16,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44135 {}] regionserver.HRegion(8855): Flush requested on 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:36:16,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 65fff0f10b766103f11a53a6ec2c07df 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:36:16,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/5b04f059a6dc4133880fa0dd216b865b is 1080, key is row0022/info:/1732246564064/Put/seqid=0 2024-11-22T03:36:16,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741849_1025 (size=12509) 2024-11-22T03:36:16,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741849_1025 (size=12509) 2024-11-22T03:36:16,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/5b04f059a6dc4133880fa0dd216b865b 2024-11-22T03:36:16,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/5b04f059a6dc4133880fa0dd216b865b as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/5b04f059a6dc4133880fa0dd216b865b 2024-11-22T03:36:16,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/5b04f059a6dc4133880fa0dd216b865b, entries=7, sequenceid=42, filesize=12.2 K 2024-11-22T03:36:16,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 65fff0f10b766103f11a53a6ec2c07df in 40ms, sequenceid=42, compaction requested=false 2024-11-22T03:36:16,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 65fff0f10b766103f11a53a6ec2c07df: 2024-11-22T03:36:16,133 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-22T03:36:16,133 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:16,133 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/eae909e73ec84115af3f3203833caee4 because midkey is the same as first or last row 2024-11-22T03:36:16,589 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:36:21,913 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 65fff0f10b766103f11a53a6ec2c07df, had cached 0 bytes from a total of 40219 2024-11-22T03:36:24,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:36:24,112 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:36:24,112 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:36:24,120 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:36:24,121 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:36:24,121 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:36:24,121 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:36:24,121 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=523817283, stopped=false 2024-11-22T03:36:24,121 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b458937b0f5f,36935,1732246487457 2024-11-22T03:36:24,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:36:24,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:36:24,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:24,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:24,165 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:36:24,166 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:36:24,166 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:36:24,166 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:36:24,166 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:36:24,166 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:36:24,167 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b458937b0f5f,44135,1732246488301' ***** 2024-11-22T03:36:24,167 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:36:24,167 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:36:24,167 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:36:24,167 INFO [RS:0;b458937b0f5f:44135 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:36:24,167 INFO [RS:0;b458937b0f5f:44135 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:36:24,168 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(3091): Received CLOSE for 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:36:24,168 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(959): stopping server b458937b0f5f,44135,1732246488301 2024-11-22T03:36:24,168 INFO [RS:0;b458937b0f5f:44135 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:36:24,168 INFO [RS:0;b458937b0f5f:44135 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b458937b0f5f:44135. 2024-11-22T03:36:24,169 DEBUG [RS:0;b458937b0f5f:44135 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:36:24,169 DEBUG [RS:0;b458937b0f5f:44135 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:36:24,169 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 65fff0f10b766103f11a53a6ec2c07df, disabling compactions & flushes 2024-11-22T03:36:24,169 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:36:24,169 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:36:24,169 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:36:24,169 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:36:24,169 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. after waiting 0 ms 2024-11-22T03:36:24,169 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:36:24,169 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:36:24,169 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:36:24,169 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 65fff0f10b766103f11a53a6ec2c07df 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-22T03:36:24,170 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T03:36:24,170 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:36:24,170 DEBUG [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 65fff0f10b766103f11a53a6ec2c07df=TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.} 2024-11-22T03:36:24,170 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:36:24,170 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:36:24,170 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:36:24,170 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:36:24,170 DEBUG [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 65fff0f10b766103f11a53a6ec2c07df 2024-11-22T03:36:24,170 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-22T03:36:24,175 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/d7b10d4c77e145009e39eb29ea0de444 is 1080, key is row0029/info:/1732246578096/Put/seqid=0 2024-11-22T03:36:24,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741850_1026 (size=8193) 2024-11-22T03:36:24,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741850_1026 (size=8193) 2024-11-22T03:36:24,186 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/d7b10d4c77e145009e39eb29ea0de444 2024-11-22T03:36:24,192 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/.tmp/info/4589050975d74cb88580c8ff5240a626 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df./info:regioninfo/1732246491937/Put/seqid=0 2024-11-22T03:36:24,196 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/.tmp/info/d7b10d4c77e145009e39eb29ea0de444 as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/d7b10d4c77e145009e39eb29ea0de444 2024-11-22T03:36:24,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741851_1027 (size=7016) 2024-11-22T03:36:24,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741851_1027 (size=7016) 2024-11-22T03:36:24,198 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/.tmp/info/4589050975d74cb88580c8ff5240a626 2024-11-22T03:36:24,205 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/d7b10d4c77e145009e39eb29ea0de444, entries=3, sequenceid=48, filesize=8.0 K 2024-11-22T03:36:24,207 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 65fff0f10b766103f11a53a6ec2c07df in 37ms, sequenceid=48, compaction requested=true 2024-11-22T03:36:24,207 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/dfbe197ab3d946e6ae91923f98de60d8, hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/4ab9b7e3bdbc4af4850978141c1d5b70, hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/929709fe3b1c401aa7cbcf126ef76e95] to archive 2024-11-22T03:36:24,210 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T03:36:24,213 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/dfbe197ab3d946e6ae91923f98de60d8 to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/archive/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/dfbe197ab3d946e6ae91923f98de60d8 2024-11-22T03:36:24,215 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/4ab9b7e3bdbc4af4850978141c1d5b70 to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/archive/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/4ab9b7e3bdbc4af4850978141c1d5b70 2024-11-22T03:36:24,217 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/929709fe3b1c401aa7cbcf126ef76e95 to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/archive/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/info/929709fe3b1c401aa7cbcf126ef76e95 2024-11-22T03:36:24,222 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/.tmp/ns/2a1bfca15e12482c9d4fd0fabb97bfbf is 43, key is default/ns:d/1732246491107/Put/seqid=0 2024-11-22T03:36:24,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741852_1028 (size=5153) 2024-11-22T03:36:24,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741852_1028 (size=5153) 2024-11-22T03:36:24,230 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/.tmp/ns/2a1bfca15e12482c9d4fd0fabb97bfbf 2024-11-22T03:36:24,232 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=b458937b0f5f:36935 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-22T03:36:24,237 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [dfbe197ab3d946e6ae91923f98de60d8=12509, 4ab9b7e3bdbc4af4850978141c1d5b70=12509, 929709fe3b1c401aa7cbcf126ef76e95=12509] 2024-11-22T03:36:24,243 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/default/TestLogRolling-testSlowSyncLogRolling/65fff0f10b766103f11a53a6ec2c07df/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-22T03:36:24,245 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:36:24,245 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 65fff0f10b766103f11a53a6ec2c07df: Waiting for close lock at 1732246584168Running coprocessor pre-close hooks at 1732246584169 (+1 ms)Disabling compacts and flushes for region at 1732246584169Disabling writes for close at 1732246584169Obtaining lock to block concurrent updates at 1732246584169Preparing flush snapshotting stores in 65fff0f10b766103f11a53a6ec2c07df at 1732246584169Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732246584170 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. at 1732246584171 (+1 ms)Flushing 65fff0f10b766103f11a53a6ec2c07df/info: creating writer at 1732246584171Flushing 65fff0f10b766103f11a53a6ec2c07df/info: appending metadata at 1732246584175 (+4 ms)Flushing 65fff0f10b766103f11a53a6ec2c07df/info: closing flushed file at 1732246584175Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f26ef4: reopening flushed file at 1732246584195 (+20 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 65fff0f10b766103f11a53a6ec2c07df in 37ms, sequenceid=48, compaction requested=true at 1732246584207 (+12 ms)Writing region close event to WAL at 1732246584238 (+31 ms)Running coprocessor post-close hooks at 1732246584244 (+6 ms)Closed at 1732246584245 (+1 ms) 2024-11-22T03:36:24,246 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732246491391.65fff0f10b766103f11a53a6ec2c07df. 2024-11-22T03:36:24,258 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/.tmp/table/b8bc3d0e71da416ba9711e228794534f is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732246491955/Put/seqid=0 2024-11-22T03:36:24,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741853_1029 (size=5396) 2024-11-22T03:36:24,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741853_1029 (size=5396) 2024-11-22T03:36:24,265 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/.tmp/table/b8bc3d0e71da416ba9711e228794534f 2024-11-22T03:36:24,273 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/.tmp/info/4589050975d74cb88580c8ff5240a626 as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/info/4589050975d74cb88580c8ff5240a626 2024-11-22T03:36:24,281 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/info/4589050975d74cb88580c8ff5240a626, entries=10, sequenceid=11, filesize=6.9 K 2024-11-22T03:36:24,282 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/.tmp/ns/2a1bfca15e12482c9d4fd0fabb97bfbf as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/ns/2a1bfca15e12482c9d4fd0fabb97bfbf 2024-11-22T03:36:24,289 INFO [regionserver/b458937b0f5f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T03:36:24,289 INFO [regionserver/b458937b0f5f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T03:36:24,290 INFO [regionserver/b458937b0f5f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:36:24,292 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/ns/2a1bfca15e12482c9d4fd0fabb97bfbf, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T03:36:24,294 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/.tmp/table/b8bc3d0e71da416ba9711e228794534f as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/table/b8bc3d0e71da416ba9711e228794534f 2024-11-22T03:36:24,305 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/table/b8bc3d0e71da416ba9711e228794534f, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T03:36:24,307 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false 2024-11-22T03:36:24,313 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T03:36:24,315 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:36:24,315 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:36:24,315 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246584170Running coprocessor pre-close hooks at 1732246584170Disabling compacts and flushes for region at 1732246584170Disabling writes for close at 1732246584170Obtaining lock to block concurrent updates at 1732246584170Preparing flush snapshotting stores in 1588230740 at 1732246584170Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732246584171 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732246584171Flushing 1588230740/info: creating writer at 1732246584172 (+1 ms)Flushing 1588230740/info: appending metadata at 1732246584191 (+19 ms)Flushing 1588230740/info: closing flushed file at 1732246584191Flushing 1588230740/ns: creating writer at 1732246584205 (+14 ms)Flushing 1588230740/ns: appending metadata at 1732246584221 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732246584221Flushing 1588230740/table: creating writer at 1732246584238 (+17 ms)Flushing 1588230740/table: appending metadata at 1732246584258 (+20 ms)Flushing 1588230740/table: closing flushed file at 1732246584258Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@90bc4dc: reopening flushed file at 1732246584272 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d881ae4: reopening flushed file at 1732246584281 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5875e86f: reopening flushed file at 1732246584292 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false at 1732246584307 (+15 ms)Writing region close event to WAL at 1732246584308 (+1 ms)Running coprocessor post-close hooks at 1732246584315 (+7 ms)Closed at 1732246584315 2024-11-22T03:36:24,316 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:36:24,371 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(976): stopping server b458937b0f5f,44135,1732246488301; all regions closed. 2024-11-22T03:36:24,374 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,374 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,375 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,375 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,376 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741834_1010 (size=3066) 2024-11-22T03:36:24,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741834_1010 (size=3066) 2024-11-22T03:36:24,386 DEBUG [RS:0;b458937b0f5f:44135 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/oldWALs 2024-11-22T03:36:24,386 INFO [RS:0;b458937b0f5f:44135 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C44135%2C1732246488301.meta:.meta(num 1732246490831) 2024-11-22T03:36:24,387 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,387 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,387 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,387 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,388 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741847_1023 (size=12695) 2024-11-22T03:36:24,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741847_1023 (size=12695) 2024-11-22T03:36:24,394 DEBUG [RS:0;b458937b0f5f:44135 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/oldWALs 2024-11-22T03:36:24,395 INFO [RS:0;b458937b0f5f:44135 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C44135%2C1732246488301:(num 1732246564062) 2024-11-22T03:36:24,395 DEBUG [RS:0;b458937b0f5f:44135 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:36:24,395 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:36:24,395 INFO [RS:0;b458937b0f5f:44135 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:36:24,395 INFO [RS:0;b458937b0f5f:44135 {}] hbase.ChoreService(370): Chore service for: regionserver/b458937b0f5f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:36:24,395 INFO [RS:0;b458937b0f5f:44135 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:36:24,395 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:36:24,396 INFO [RS:0;b458937b0f5f:44135 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44135 2024-11-22T03:36:24,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b458937b0f5f,44135,1732246488301 2024-11-22T03:36:24,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:36:24,420 INFO [RS:0;b458937b0f5f:44135 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:36:24,421 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b458937b0f5f,44135,1732246488301] 2024-11-22T03:36:24,441 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b458937b0f5f,44135,1732246488301 already deleted, retry=false 2024-11-22T03:36:24,442 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b458937b0f5f,44135,1732246488301 expired; onlineServers=0 2024-11-22T03:36:24,442 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b458937b0f5f,36935,1732246487457' ***** 2024-11-22T03:36:24,442 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:36:24,442 INFO [M:0;b458937b0f5f:36935 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:36:24,442 INFO [M:0;b458937b0f5f:36935 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:36:24,442 DEBUG [M:0;b458937b0f5f:36935 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:36:24,443 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:36:24,443 DEBUG [M:0;b458937b0f5f:36935 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:36:24,443 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246490097 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246490097,5,FailOnTimeoutGroup] 2024-11-22T03:36:24,443 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246490097 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246490097,5,FailOnTimeoutGroup] 2024-11-22T03:36:24,443 INFO [M:0;b458937b0f5f:36935 {}] hbase.ChoreService(370): Chore service for: master/b458937b0f5f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:36:24,443 INFO [M:0;b458937b0f5f:36935 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:36:24,443 DEBUG [M:0;b458937b0f5f:36935 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:36:24,443 INFO [M:0;b458937b0f5f:36935 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:36:24,443 INFO [M:0;b458937b0f5f:36935 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:36:24,444 INFO [M:0;b458937b0f5f:36935 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:36:24,444 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:36:24,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:36:24,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:24,452 DEBUG [M:0;b458937b0f5f:36935 {}] zookeeper.ZKUtil(347): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:36:24,452 WARN [M:0;b458937b0f5f:36935 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:36:24,453 INFO [M:0;b458937b0f5f:36935 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/.lastflushedseqids 2024-11-22T03:36:24,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741854_1030 (size=130) 2024-11-22T03:36:24,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741854_1030 (size=130) 2024-11-22T03:36:24,465 INFO [M:0;b458937b0f5f:36935 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:36:24,465 INFO [M:0;b458937b0f5f:36935 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:36:24,465 DEBUG [M:0;b458937b0f5f:36935 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:36:24,465 INFO [M:0;b458937b0f5f:36935 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:24,465 DEBUG [M:0;b458937b0f5f:36935 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:24,465 DEBUG [M:0;b458937b0f5f:36935 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:36:24,465 DEBUG [M:0;b458937b0f5f:36935 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:24,466 INFO [M:0;b458937b0f5f:36935 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-22T03:36:24,485 DEBUG [M:0;b458937b0f5f:36935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0651cf90ce994f55a6b9009018b26172 is 82, key is hbase:meta,,1/info:regioninfo/1732246490936/Put/seqid=0 2024-11-22T03:36:24,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741855_1031 (size=5672) 2024-11-22T03:36:24,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741855_1031 (size=5672) 2024-11-22T03:36:24,492 INFO [M:0;b458937b0f5f:36935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0651cf90ce994f55a6b9009018b26172 2024-11-22T03:36:24,519 DEBUG [M:0;b458937b0f5f:36935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5978dac792f34bd486ce268e32881a40 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732246491966/Put/seqid=0 2024-11-22T03:36:24,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741856_1032 (size=6247) 2024-11-22T03:36:24,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741856_1032 (size=6247) 2024-11-22T03:36:24,531 INFO [M:0;b458937b0f5f:36935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5978dac792f34bd486ce268e32881a40 2024-11-22T03:36:24,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:36:24,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44135-0x101609bba170001, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:36:24,532 INFO [RS:0;b458937b0f5f:44135 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:36:24,532 INFO [RS:0;b458937b0f5f:44135 {}] regionserver.HRegionServer(1031): Exiting; stopping=b458937b0f5f,44135,1732246488301; zookeeper connection closed. 2024-11-22T03:36:24,532 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@9ed3c14 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@9ed3c14 2024-11-22T03:36:24,533 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T03:36:24,541 INFO [M:0;b458937b0f5f:36935 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5978dac792f34bd486ce268e32881a40 2024-11-22T03:36:24,563 DEBUG [M:0;b458937b0f5f:36935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c112fd4eb536472ea5f03f7d246b0905 is 69, key is b458937b0f5f,44135,1732246488301/rs:state/1732246490182/Put/seqid=0 2024-11-22T03:36:24,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741857_1033 (size=5156) 2024-11-22T03:36:24,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741857_1033 (size=5156) 2024-11-22T03:36:24,571 INFO [M:0;b458937b0f5f:36935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c112fd4eb536472ea5f03f7d246b0905 2024-11-22T03:36:24,602 DEBUG [M:0;b458937b0f5f:36935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e821190f6b83454c96ac1659349d2bcb is 52, key is load_balancer_on/state:d/1732246491362/Put/seqid=0 2024-11-22T03:36:24,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741858_1034 (size=5056) 2024-11-22T03:36:24,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741858_1034 (size=5056) 2024-11-22T03:36:24,611 INFO [M:0;b458937b0f5f:36935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e821190f6b83454c96ac1659349d2bcb 2024-11-22T03:36:24,619 DEBUG [M:0;b458937b0f5f:36935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0651cf90ce994f55a6b9009018b26172 as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0651cf90ce994f55a6b9009018b26172 2024-11-22T03:36:24,627 INFO [M:0;b458937b0f5f:36935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0651cf90ce994f55a6b9009018b26172, entries=8, sequenceid=59, filesize=5.5 K 2024-11-22T03:36:24,628 DEBUG [M:0;b458937b0f5f:36935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5978dac792f34bd486ce268e32881a40 as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5978dac792f34bd486ce268e32881a40 2024-11-22T03:36:24,635 INFO [M:0;b458937b0f5f:36935 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5978dac792f34bd486ce268e32881a40 2024-11-22T03:36:24,636 INFO [M:0;b458937b0f5f:36935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5978dac792f34bd486ce268e32881a40, entries=6, sequenceid=59, filesize=6.1 K 2024-11-22T03:36:24,637 DEBUG [M:0;b458937b0f5f:36935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c112fd4eb536472ea5f03f7d246b0905 as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c112fd4eb536472ea5f03f7d246b0905 2024-11-22T03:36:24,644 INFO [M:0;b458937b0f5f:36935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c112fd4eb536472ea5f03f7d246b0905, entries=1, sequenceid=59, filesize=5.0 K 2024-11-22T03:36:24,645 DEBUG [M:0;b458937b0f5f:36935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e821190f6b83454c96ac1659349d2bcb as hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e821190f6b83454c96ac1659349d2bcb 2024-11-22T03:36:24,653 INFO [M:0;b458937b0f5f:36935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e821190f6b83454c96ac1659349d2bcb, entries=1, sequenceid=59, filesize=4.9 K 2024-11-22T03:36:24,654 INFO [M:0;b458937b0f5f:36935 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 189ms, sequenceid=59, compaction requested=false 2024-11-22T03:36:24,656 INFO [M:0;b458937b0f5f:36935 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:24,656 DEBUG [M:0;b458937b0f5f:36935 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246584465Disabling compacts and flushes for region at 1732246584465Disabling writes for close at 1732246584465Obtaining lock to block concurrent updates at 1732246584466 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732246584466Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732246584466Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732246584467 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732246584467Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732246584484 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732246584484Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732246584499 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732246584518 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732246584519 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732246584542 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732246584562 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732246584562Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732246584581 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732246584601 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732246584601Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42725094: reopening flushed file at 1732246584618 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69fccdd1: reopening flushed file at 1732246584627 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d306e07: reopening flushed file at 1732246584636 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d0c150b: reopening flushed file at 1732246584644 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 189ms, sequenceid=59, compaction requested=false at 1732246584654 (+10 ms)Writing region close event to WAL at 1732246584656 (+2 ms)Closed at 1732246584656 2024-11-22T03:36:24,657 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,657 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,657 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,657 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,658 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:24,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36143 is added to blk_1073741830_1006 (size=27973) 2024-11-22T03:36:24,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37329 is added to blk_1073741830_1006 (size=27973) 2024-11-22T03:36:24,661 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:36:24,661 INFO [M:0;b458937b0f5f:36935 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:36:24,661 INFO [M:0;b458937b0f5f:36935 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36935 2024-11-22T03:36:24,662 INFO [M:0;b458937b0f5f:36935 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:36:24,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:36:24,773 INFO [M:0;b458937b0f5f:36935 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:36:24,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36935-0x101609bba170000, quorum=127.0.0.1:50067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:36:24,779 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e34e63e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:24,782 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5300105e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:36:24,782 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:36:24,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65b328c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:36:24,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e06ea5e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/hadoop.log.dir/,STOPPED} 2024-11-22T03:36:24,786 WARN [BP-1930816406-172.17.0.3-1732246483340 heartbeating to localhost/127.0.0.1:35981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:36:24,786 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:36:24,786 WARN [BP-1930816406-172.17.0.3-1732246483340 heartbeating to localhost/127.0.0.1:35981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1930816406-172.17.0.3-1732246483340 (Datanode Uuid 2ef4eabc-106a-4bc4-9c27-c5b88a04f260) service to localhost/127.0.0.1:35981 2024-11-22T03:36:24,786 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:36:24,787 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/cluster_aeeb5155-ad5b-8730-4b04-32c9067c52e8/data/data3/current/BP-1930816406-172.17.0.3-1732246483340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:24,787 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/cluster_aeeb5155-ad5b-8730-4b04-32c9067c52e8/data/data4/current/BP-1930816406-172.17.0.3-1732246483340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:24,788 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:36:24,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1442ffa6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:24,794 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@46cab4e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:36:24,794 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:36:24,794 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1493401e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:36:24,794 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@198fe7a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/hadoop.log.dir/,STOPPED} 2024-11-22T03:36:24,796 WARN [BP-1930816406-172.17.0.3-1732246483340 heartbeating to localhost/127.0.0.1:35981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:36:24,796 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:36:24,796 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:36:24,796 WARN [BP-1930816406-172.17.0.3-1732246483340 heartbeating to localhost/127.0.0.1:35981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1930816406-172.17.0.3-1732246483340 (Datanode Uuid 2f766952-069e-4971-a304-82755b16f133) service to localhost/127.0.0.1:35981 2024-11-22T03:36:24,797 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/cluster_aeeb5155-ad5b-8730-4b04-32c9067c52e8/data/data1/current/BP-1930816406-172.17.0.3-1732246483340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:24,798 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/cluster_aeeb5155-ad5b-8730-4b04-32c9067c52e8/data/data2/current/BP-1930816406-172.17.0.3-1732246483340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:24,798 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:36:24,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@595f45d4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:36:24,807 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1298d5a2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:36:24,808 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:36:24,808 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18d8eba1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:36:24,808 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73b23f80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/hadoop.log.dir/,STOPPED} 2024-11-22T03:36:24,817 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:36:24,862 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:36:24,888 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/b458937b0f5f:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: master/b458937b0f5f:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@2977aefe java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:35981 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:35981 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:35981 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/b458937b0f5f:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:35981 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35981 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=405 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=207 (was 286), ProcessCount=11 (was 11), AvailableMemoryMB=7276 (was 7641) 2024-11-22T03:36:24,910 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=80, OpenFileDescriptor=407, MaxFileDescriptor=1048576, SystemLoadAverage=207, ProcessCount=11, AvailableMemoryMB=7273 2024-11-22T03:36:24,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:36:24,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/hadoop.log.dir so I do NOT create it in target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d 2024-11-22T03:36:24,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c1c30c7-2fd0-5c44-9cdb-85679ed712c8/hadoop.tmp.dir so I do NOT create it in target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d 2024-11-22T03:36:24,911 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/cluster_5fb8c965-33ad-d855-9120-7982d7da1aa4, deleteOnExit=true 2024-11-22T03:36:24,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:36:24,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/test.cache.data in system properties and HBase conf 2024-11-22T03:36:24,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:36:24,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:36:24,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:36:24,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:36:24,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:36:24,912 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:36:24,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:36:24,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:36:24,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:36:24,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:36:24,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:36:24,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:36:24,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:36:24,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:36:24,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:36:24,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:36:24,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:36:24,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:36:24,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:36:24,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:36:24,930 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:36:25,285 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:36:25,293 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:36:25,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:36:25,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:36:25,294 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:36:25,295 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:36:25,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25a1dc3a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:36:25,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e4aa3d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:36:25,399 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f1f2e53{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/java.io.tmpdir/jetty-localhost-42147-hadoop-hdfs-3_4_1-tests_jar-_-any-16862336134240063396/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:36:25,399 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d7427ee{HTTP/1.1, (http/1.1)}{localhost:42147} 2024-11-22T03:36:25,399 INFO [Time-limited test {}] server.Server(415): Started @104817ms 2024-11-22T03:36:25,415 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:36:25,889 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:36:25,894 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:36:25,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:36:25,895 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:36:25,895 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:36:25,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25910b08{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:36:25,896 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ff1bba9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:36:25,991 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1deb503b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/java.io.tmpdir/jetty-localhost-35507-hadoop-hdfs-3_4_1-tests_jar-_-any-4138000253246759081/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:25,991 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40ece9b3{HTTP/1.1, (http/1.1)}{localhost:35507} 2024-11-22T03:36:25,991 INFO [Time-limited test {}] server.Server(415): Started @105409ms 2024-11-22T03:36:25,992 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:36:26,023 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:36:26,028 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:36:26,029 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:36:26,029 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:36:26,029 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:36:26,030 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a18f7ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:36:26,030 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a7eee1d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:36:26,127 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60e14a6f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/java.io.tmpdir/jetty-localhost-34511-hadoop-hdfs-3_4_1-tests_jar-_-any-8635680262035630221/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:26,127 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1c3e0707{HTTP/1.1, (http/1.1)}{localhost:34511} 2024-11-22T03:36:26,127 INFO [Time-limited test {}] server.Server(415): Started @105545ms 2024-11-22T03:36:26,129 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:36:27,372 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/cluster_5fb8c965-33ad-d855-9120-7982d7da1aa4/data/data1/current/BP-1688087102-172.17.0.3-1732246584942/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:27,372 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/cluster_5fb8c965-33ad-d855-9120-7982d7da1aa4/data/data2/current/BP-1688087102-172.17.0.3-1732246584942/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:27,391 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:36:27,393 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x51a1555a60f8b24c with lease ID 0xd5a62c1cee72dd42: Processing first storage report for DS-b34d1f7b-7412-468a-9d51-11a67b1515a3 from datanode DatanodeRegistration(127.0.0.1:37671, datanodeUuid=2f946c36-2f75-4b27-bb81-20295e2e81b1, infoPort=42925, infoSecurePort=0, ipcPort=39873, storageInfo=lv=-57;cid=testClusterID;nsid=1447403561;c=1732246584942) 2024-11-22T03:36:27,393 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x51a1555a60f8b24c with lease ID 0xd5a62c1cee72dd42: from storage DS-b34d1f7b-7412-468a-9d51-11a67b1515a3 node DatanodeRegistration(127.0.0.1:37671, datanodeUuid=2f946c36-2f75-4b27-bb81-20295e2e81b1, infoPort=42925, infoSecurePort=0, ipcPort=39873, storageInfo=lv=-57;cid=testClusterID;nsid=1447403561;c=1732246584942), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:27,393 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x51a1555a60f8b24c with lease ID 0xd5a62c1cee72dd42: Processing first storage report for DS-81ff8cac-b57f-429d-aaa1-c478b15dc94d from datanode DatanodeRegistration(127.0.0.1:37671, datanodeUuid=2f946c36-2f75-4b27-bb81-20295e2e81b1, infoPort=42925, infoSecurePort=0, ipcPort=39873, storageInfo=lv=-57;cid=testClusterID;nsid=1447403561;c=1732246584942) 2024-11-22T03:36:27,393 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x51a1555a60f8b24c with lease ID 0xd5a62c1cee72dd42: from storage DS-81ff8cac-b57f-429d-aaa1-c478b15dc94d node DatanodeRegistration(127.0.0.1:37671, datanodeUuid=2f946c36-2f75-4b27-bb81-20295e2e81b1, infoPort=42925, infoSecurePort=0, ipcPort=39873, storageInfo=lv=-57;cid=testClusterID;nsid=1447403561;c=1732246584942), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:27,517 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/cluster_5fb8c965-33ad-d855-9120-7982d7da1aa4/data/data3/current/BP-1688087102-172.17.0.3-1732246584942/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:27,517 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/cluster_5fb8c965-33ad-d855-9120-7982d7da1aa4/data/data4/current/BP-1688087102-172.17.0.3-1732246584942/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:27,532 WARN [Thread-439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:36:27,535 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd057d88c7389d68e with lease ID 0xd5a62c1cee72dd43: Processing first storage report for DS-8bd70877-a980-4ec7-8074-4d9ba1dcb295 from datanode DatanodeRegistration(127.0.0.1:45531, datanodeUuid=df570608-37e0-4598-8e79-a80b72671ee7, infoPort=44467, infoSecurePort=0, ipcPort=35509, storageInfo=lv=-57;cid=testClusterID;nsid=1447403561;c=1732246584942) 2024-11-22T03:36:27,535 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd057d88c7389d68e with lease ID 0xd5a62c1cee72dd43: from storage DS-8bd70877-a980-4ec7-8074-4d9ba1dcb295 node DatanodeRegistration(127.0.0.1:45531, datanodeUuid=df570608-37e0-4598-8e79-a80b72671ee7, infoPort=44467, infoSecurePort=0, ipcPort=35509, storageInfo=lv=-57;cid=testClusterID;nsid=1447403561;c=1732246584942), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:27,535 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd057d88c7389d68e with lease ID 0xd5a62c1cee72dd43: Processing first storage report for DS-567eed6b-7858-44dd-ab58-dcde841c33c6 from datanode DatanodeRegistration(127.0.0.1:45531, datanodeUuid=df570608-37e0-4598-8e79-a80b72671ee7, infoPort=44467, infoSecurePort=0, ipcPort=35509, storageInfo=lv=-57;cid=testClusterID;nsid=1447403561;c=1732246584942) 2024-11-22T03:36:27,535 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd057d88c7389d68e with lease ID 0xd5a62c1cee72dd43: from storage DS-567eed6b-7858-44dd-ab58-dcde841c33c6 node DatanodeRegistration(127.0.0.1:45531, datanodeUuid=df570608-37e0-4598-8e79-a80b72671ee7, infoPort=44467, infoSecurePort=0, ipcPort=35509, storageInfo=lv=-57;cid=testClusterID;nsid=1447403561;c=1732246584942), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:27,578 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d 2024-11-22T03:36:27,581 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/cluster_5fb8c965-33ad-d855-9120-7982d7da1aa4/zookeeper_0, clientPort=64019, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/cluster_5fb8c965-33ad-d855-9120-7982d7da1aa4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/cluster_5fb8c965-33ad-d855-9120-7982d7da1aa4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:36:27,582 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64019 2024-11-22T03:36:27,583 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:27,584 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:27,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:36:27,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:36:27,595 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb with version=8 2024-11-22T03:36:27,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/hbase-staging 2024-11-22T03:36:27,598 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:36:27,598 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:27,598 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:27,598 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:36:27,598 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:27,598 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:36:27,598 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:36:27,598 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:36:27,599 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40693 2024-11-22T03:36:27,601 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40693 connecting to ZooKeeper ensemble=127.0.0.1:64019 2024-11-22T03:36:27,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:406930x0, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:36:27,653 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40693-0x101609d44680000 connected 2024-11-22T03:36:27,749 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:27,753 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:27,757 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:36:27,758 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb, hbase.cluster.distributed=false 2024-11-22T03:36:27,761 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:36:27,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40693 2024-11-22T03:36:27,763 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40693 2024-11-22T03:36:27,763 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40693 2024-11-22T03:36:27,764 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40693 2024-11-22T03:36:27,764 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40693 2024-11-22T03:36:27,785 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:36:27,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:27,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:27,785 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:36:27,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:27,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:36:27,785 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:36:27,785 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:36:27,786 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37999 2024-11-22T03:36:27,787 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37999 connecting to ZooKeeper ensemble=127.0.0.1:64019 2024-11-22T03:36:27,788 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:27,790 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:27,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:379990x0, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:36:27,801 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379990x0, quorum=127.0.0.1:64019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:36:27,801 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:36:27,804 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37999-0x101609d44680001 connected 2024-11-22T03:36:27,805 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:36:27,806 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:36:27,807 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:36:27,807 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37999 2024-11-22T03:36:27,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37999 2024-11-22T03:36:27,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37999 2024-11-22T03:36:27,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37999 2024-11-22T03:36:27,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37999 2024-11-22T03:36:27,833 DEBUG [M:0;b458937b0f5f:40693 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b458937b0f5f:40693 2024-11-22T03:36:27,834 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b458937b0f5f,40693,1732246587597 2024-11-22T03:36:27,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:36:27,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:36:27,843 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b458937b0f5f,40693,1732246587597 2024-11-22T03:36:27,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:27,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:36:27,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:27,854 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:36:27,854 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b458937b0f5f,40693,1732246587597 from backup master directory 2024-11-22T03:36:27,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:36:27,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b458937b0f5f,40693,1732246587597 2024-11-22T03:36:27,863 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:36:27,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:36:27,864 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b458937b0f5f,40693,1732246587597 2024-11-22T03:36:27,869 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/hbase.id] with ID: bfe80d12-3906-4d76-a464-ffa836e02ee8 2024-11-22T03:36:27,869 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/.tmp/hbase.id 2024-11-22T03:36:27,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:36:27,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:36:27,881 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/.tmp/hbase.id]:[hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/hbase.id] 2024-11-22T03:36:27,891 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:36:27,892 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:36:27,893 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T03:36:27,894 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-22T03:36:27,897 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:27,897 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:36:27,900 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-22T03:36:27,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:27,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:27,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:36:27,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:36:27,923 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:36:27,924 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:36:27,924 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:36:27,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:36:27,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:36:27,934 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store 2024-11-22T03:36:27,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:36:27,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:36:27,942 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:36:27,942 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:36:27,942 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:27,942 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:27,942 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:36:27,943 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:27,943 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:27,943 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246587942Disabling compacts and flushes for region at 1732246587942Disabling writes for close at 1732246587942Writing region close event to WAL at 1732246587943 (+1 ms)Closed at 1732246587943 2024-11-22T03:36:27,944 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/.initializing 2024-11-22T03:36:27,944 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/WALs/b458937b0f5f,40693,1732246587597 2024-11-22T03:36:27,947 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C40693%2C1732246587597, suffix=, logDir=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/WALs/b458937b0f5f,40693,1732246587597, archiveDir=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/oldWALs, maxLogs=10 2024-11-22T03:36:27,948 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C40693%2C1732246587597.1732246587948 2024-11-22T03:36:27,955 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/WALs/b458937b0f5f,40693,1732246587597/b458937b0f5f%2C40693%2C1732246587597.1732246587948 2024-11-22T03:36:27,956 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44467:44467),(127.0.0.1/127.0.0.1:42925:42925)] 2024-11-22T03:36:27,957 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:36:27,957 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:36:27,957 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:27,957 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:27,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:27,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:36:27,962 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:27,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:27,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:27,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:36:27,966 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:27,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:36:27,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:27,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:36:27,970 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:27,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:36:27,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:27,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:36:27,973 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:27,974 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:36:27,974 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:27,979 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:27,979 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:27,982 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:27,982 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:27,983 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:36:27,984 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:27,987 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:36:27,988 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802440, jitterRate=0.02035534381866455}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:36:27,989 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732246587958Initializing all the Stores at 1732246587959 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246587959Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246587959Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246587959Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246587959Cleaning up temporary data from old regions at 1732246587982 (+23 ms)Region opened successfully at 1732246587989 (+7 ms) 2024-11-22T03:36:27,990 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:36:27,995 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49dbf5f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:36:27,996 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:36:27,996 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:36:27,996 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:36:27,996 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:36:27,997 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T03:36:27,997 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T03:36:27,997 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:36:28,000 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:36:28,001 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:36:28,011 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:36:28,011 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:36:28,012 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:36:28,021 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:36:28,022 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:36:28,023 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:36:28,032 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:36:28,034 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:36:28,042 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:36:28,048 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:36:28,060 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:36:28,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:36:28,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:36:28,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:28,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:28,072 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b458937b0f5f,40693,1732246587597, sessionid=0x101609d44680000, setting cluster-up flag (Was=false) 2024-11-22T03:36:28,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:28,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:28,127 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:36:28,131 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,40693,1732246587597 2024-11-22T03:36:28,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:28,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:28,190 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:36:28,194 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,40693,1732246587597 2024-11-22T03:36:28,197 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:36:28,200 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:36:28,200 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:36:28,200 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:36:28,201 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b458937b0f5f,40693,1732246587597 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:36:28,203 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:36:28,203 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:36:28,203 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:36:28,203 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:36:28,203 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b458937b0f5f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:36:28,204 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,204 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:36:28,204 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,205 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732246618205 2024-11-22T03:36:28,205 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:36:28,205 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:36:28,205 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:36:28,205 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:36:28,205 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:36:28,206 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:36:28,206 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,206 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:36:28,206 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:36:28,206 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:36:28,206 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:36:28,207 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:36:28,207 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:36:28,207 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:36:28,207 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246588207,5,FailOnTimeoutGroup] 2024-11-22T03:36:28,208 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246588207,5,FailOnTimeoutGroup] 2024-11-22T03:36:28,208 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,208 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:36:28,208 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,208 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,208 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:28,208 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:36:28,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:36:28,219 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(746): ClusterId : bfe80d12-3906-4d76-a464-ffa836e02ee8 2024-11-22T03:36:28,219 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:36:28,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:36:28,221 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:36:28,221 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb 2024-11-22T03:36:28,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:36:28,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:36:28,230 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:36:28,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:36:28,233 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:36:28,233 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:36:28,234 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:36:28,234 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:28,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:28,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:36:28,236 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:36:28,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:28,236 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:28,236 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:36:28,238 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:36:28,238 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:28,238 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:28,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:36:28,240 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:36:28,240 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:28,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:28,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:36:28,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/data/hbase/meta/1588230740 2024-11-22T03:36:28,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/data/hbase/meta/1588230740 2024-11-22T03:36:28,243 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:36:28,244 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:36:28,244 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:36:28,244 DEBUG [RS:0;b458937b0f5f:37999 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6be0c5c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:36:28,244 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:36:28,246 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:36:28,248 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:36:28,248 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862144, jitterRate=0.09627322852611542}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:36:28,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732246588231Initializing all the Stores at 1732246588232 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246588232Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246588232Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246588232Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246588232Cleaning up temporary data from old regions at 1732246588244 (+12 ms)Region opened successfully at 1732246588249 (+5 ms) 2024-11-22T03:36:28,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:36:28,250 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:36:28,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:36:28,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:36:28,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:36:28,250 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:36:28,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246588250Disabling compacts and flushes for region at 1732246588250Disabling writes for close at 1732246588250Writing region close event to WAL at 1732246588250Closed at 1732246588250 2024-11-22T03:36:28,252 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:36:28,252 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:36:28,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:36:28,254 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:36:28,255 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:36:28,258 DEBUG [RS:0;b458937b0f5f:37999 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b458937b0f5f:37999 2024-11-22T03:36:28,258 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:36:28,258 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:36:28,258 DEBUG [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:36:28,259 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(2659): reportForDuty to master=b458937b0f5f,40693,1732246587597 with port=37999, startcode=1732246587784 2024-11-22T03:36:28,259 DEBUG [RS:0;b458937b0f5f:37999 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:36:28,262 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57335, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:36:28,262 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40693 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b458937b0f5f,37999,1732246587784 2024-11-22T03:36:28,262 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40693 {}] master.ServerManager(517): Registering regionserver=b458937b0f5f,37999,1732246587784 2024-11-22T03:36:28,265 DEBUG [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb 2024-11-22T03:36:28,265 DEBUG [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41987 2024-11-22T03:36:28,265 DEBUG [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:36:28,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:36:28,275 DEBUG [RS:0;b458937b0f5f:37999 {}] zookeeper.ZKUtil(111): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b458937b0f5f,37999,1732246587784 2024-11-22T03:36:28,275 WARN [RS:0;b458937b0f5f:37999 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:36:28,275 INFO [RS:0;b458937b0f5f:37999 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:36:28,275 DEBUG [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/WALs/b458937b0f5f,37999,1732246587784 2024-11-22T03:36:28,275 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b458937b0f5f,37999,1732246587784] 2024-11-22T03:36:28,279 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:36:28,283 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:36:28,283 INFO [RS:0;b458937b0f5f:37999 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:36:28,283 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,283 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:36:28,284 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:36:28,284 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:36:28,285 DEBUG [RS:0;b458937b0f5f:37999 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:36:28,289 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,289 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,289 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,289 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,289 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,289 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,37999,1732246587784-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:36:28,304 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:36:28,304 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,37999,1732246587784-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,304 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,304 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.Replication(171): b458937b0f5f,37999,1732246587784 started 2024-11-22T03:36:28,319 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,319 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(1482): Serving as b458937b0f5f,37999,1732246587784, RpcServer on b458937b0f5f/172.17.0.3:37999, sessionid=0x101609d44680001 2024-11-22T03:36:28,320 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:36:28,320 DEBUG [RS:0;b458937b0f5f:37999 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b458937b0f5f,37999,1732246587784 2024-11-22T03:36:28,320 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,37999,1732246587784' 2024-11-22T03:36:28,320 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:36:28,321 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:36:28,321 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:36:28,321 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:36:28,321 DEBUG [RS:0;b458937b0f5f:37999 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b458937b0f5f,37999,1732246587784 2024-11-22T03:36:28,321 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,37999,1732246587784' 2024-11-22T03:36:28,321 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:36:28,322 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:36:28,322 DEBUG [RS:0;b458937b0f5f:37999 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:36:28,322 INFO [RS:0;b458937b0f5f:37999 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:36:28,322 INFO [RS:0;b458937b0f5f:37999 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:36:28,405 WARN [b458937b0f5f:40693 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:36:28,426 INFO [RS:0;b458937b0f5f:37999 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C37999%2C1732246587784, suffix=, logDir=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/WALs/b458937b0f5f,37999,1732246587784, archiveDir=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/oldWALs, maxLogs=32 2024-11-22T03:36:28,427 INFO [RS:0;b458937b0f5f:37999 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C37999%2C1732246587784.1732246588427 2024-11-22T03:36:28,435 INFO [RS:0;b458937b0f5f:37999 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/WALs/b458937b0f5f,37999,1732246587784/b458937b0f5f%2C37999%2C1732246587784.1732246588427 2024-11-22T03:36:28,440 DEBUG [RS:0;b458937b0f5f:37999 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42925:42925),(127.0.0.1/127.0.0.1:44467:44467)] 2024-11-22T03:36:28,656 DEBUG [b458937b0f5f:40693 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:36:28,658 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b458937b0f5f,37999,1732246587784 2024-11-22T03:36:28,663 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,37999,1732246587784, state=OPENING 2024-11-22T03:36:28,713 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:36:28,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:28,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:28,727 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:36:28,727 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:36:28,727 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:36:28,727 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,37999,1732246587784}] 2024-11-22T03:36:28,882 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:36:28,888 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48815, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:36:28,893 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:36:28,893 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:36:28,896 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C37999%2C1732246587784.meta, suffix=.meta, logDir=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/WALs/b458937b0f5f,37999,1732246587784, archiveDir=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/oldWALs, maxLogs=32 2024-11-22T03:36:28,898 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C37999%2C1732246587784.meta.1732246588898.meta 2024-11-22T03:36:28,905 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/WALs/b458937b0f5f,37999,1732246587784/b458937b0f5f%2C37999%2C1732246587784.meta.1732246588898.meta 2024-11-22T03:36:28,906 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42925:42925),(127.0.0.1/127.0.0.1:44467:44467)] 2024-11-22T03:36:28,907 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:36:28,908 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:36:28,908 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:36:28,908 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:36:28,908 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:36:28,908 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:36:28,908 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:36:28,908 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:36:28,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:36:28,911 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:36:28,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:28,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:28,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:36:28,912 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:36:28,913 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:28,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:28,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:36:28,914 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:36:28,914 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:28,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:28,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:36:28,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:36:28,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:28,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:28,916 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:36:28,917 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/data/hbase/meta/1588230740 2024-11-22T03:36:28,918 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/data/hbase/meta/1588230740 2024-11-22T03:36:28,920 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:36:28,920 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:36:28,921 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:36:28,922 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:36:28,923 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742090, jitterRate=-0.05638428032398224}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:36:28,923 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:36:28,924 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732246588908Writing region info on filesystem at 1732246588908Initializing all the Stores at 1732246588909 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246588909Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246588910 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246588910Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246588910Cleaning up temporary data from old regions at 1732246588920 (+10 ms)Running coprocessor post-open hooks at 1732246588923 (+3 ms)Region opened successfully at 1732246588924 (+1 ms) 2024-11-22T03:36:28,926 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732246588881 2024-11-22T03:36:28,928 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:36:28,928 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:36:28,930 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b458937b0f5f,37999,1732246587784 2024-11-22T03:36:28,931 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,37999,1732246587784, state=OPEN 2024-11-22T03:36:28,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:36:28,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:36:28,968 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b458937b0f5f,37999,1732246587784 2024-11-22T03:36:28,968 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:36:28,968 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:36:28,972 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:36:28,972 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,37999,1732246587784 in 241 msec 2024-11-22T03:36:28,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:36:28,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 720 msec 2024-11-22T03:36:28,976 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:36:28,976 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:36:28,978 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:36:28,978 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,37999,1732246587784, seqNum=-1] 2024-11-22T03:36:28,978 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:36:28,980 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60917, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:36:28,989 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 789 msec 2024-11-22T03:36:28,989 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732246588989, completionTime=-1 2024-11-22T03:36:28,989 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:36:28,989 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:36:28,992 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:36:28,992 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732246648992 2024-11-22T03:36:28,992 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732246708992 2024-11-22T03:36:28,992 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T03:36:28,992 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40693,1732246587597-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,992 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40693,1732246587597-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,993 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40693,1732246587597-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,993 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b458937b0f5f:40693, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,993 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,993 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:28,995 DEBUG [master/b458937b0f5f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:36:28,998 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.134sec 2024-11-22T03:36:28,998 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:36:28,998 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:36:28,998 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:36:28,998 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:36:28,999 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:36:28,999 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40693,1732246587597-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:36:28,999 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40693,1732246587597-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:36:29,003 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:36:29,003 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:36:29,003 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40693,1732246587597-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:29,019 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2393cf77, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:36:29,019 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b458937b0f5f,40693,-1 for getting cluster id 2024-11-22T03:36:29,020 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:36:29,022 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'bfe80d12-3906-4d76-a464-ffa836e02ee8' 2024-11-22T03:36:29,023 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:36:29,023 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "bfe80d12-3906-4d76-a464-ffa836e02ee8" 2024-11-22T03:36:29,024 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d3e7eb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:36:29,024 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b458937b0f5f,40693,-1] 2024-11-22T03:36:29,025 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:36:29,025 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:36:29,028 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56182, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:36:29,029 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@485cfba8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:36:29,030 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:36:29,031 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,37999,1732246587784, seqNum=-1] 2024-11-22T03:36:29,032 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:36:29,035 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35196, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:36:29,037 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b458937b0f5f,40693,1732246587597 2024-11-22T03:36:29,038 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:29,041 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:36:29,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:36:29,041 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:36:29,041 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:36:29,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:36:29,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:36:29,042 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:36:29,042 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:36:29,042 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=972283204, stopped=false 2024-11-22T03:36:29,042 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b458937b0f5f,40693,1732246587597 2024-11-22T03:36:29,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:36:29,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:36:29,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:29,063 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:36:29,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:29,064 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:36:29,064 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:36:29,064 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:36:29,064 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:36:29,064 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:36:29,064 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b458937b0f5f,37999,1732246587784' ***** 2024-11-22T03:36:29,064 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:36:29,065 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:36:29,065 INFO [RS:0;b458937b0f5f:37999 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:36:29,065 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:36:29,065 INFO [RS:0;b458937b0f5f:37999 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:36:29,065 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(959): stopping server b458937b0f5f,37999,1732246587784 2024-11-22T03:36:29,065 INFO [RS:0;b458937b0f5f:37999 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:36:29,065 INFO [RS:0;b458937b0f5f:37999 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b458937b0f5f:37999. 2024-11-22T03:36:29,065 DEBUG [RS:0;b458937b0f5f:37999 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:36:29,065 DEBUG [RS:0;b458937b0f5f:37999 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:36:29,065 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:36:29,065 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:36:29,065 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:36:29,065 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:36:29,066 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-22T03:36:29,066 DEBUG [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-22T03:36:29,066 DEBUG [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-22T03:36:29,066 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:36:29,066 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:36:29,066 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:36:29,066 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:36:29,066 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:36:29,066 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-22T03:36:29,083 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/data/hbase/meta/1588230740/.tmp/ns/4d287a84b9134b41a2eabda4b8701942 is 43, key is default/ns:d/1732246588981/Put/seqid=0 2024-11-22T03:36:29,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741835_1011 (size=5153) 2024-11-22T03:36:29,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741835_1011 (size=5153) 2024-11-22T03:36:29,089 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/data/hbase/meta/1588230740/.tmp/ns/4d287a84b9134b41a2eabda4b8701942 2024-11-22T03:36:29,097 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/data/hbase/meta/1588230740/.tmp/ns/4d287a84b9134b41a2eabda4b8701942 as hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/data/hbase/meta/1588230740/ns/4d287a84b9134b41a2eabda4b8701942 2024-11-22T03:36:29,107 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/data/hbase/meta/1588230740/ns/4d287a84b9134b41a2eabda4b8701942, entries=2, sequenceid=6, filesize=5.0 K 2024-11-22T03:36:29,108 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false 2024-11-22T03:36:29,109 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T03:36:29,115 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T03:36:29,116 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:36:29,116 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:36:29,116 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246589066Running coprocessor pre-close hooks at 1732246589066Disabling compacts and flushes for region at 1732246589066Disabling writes for close at 1732246589066Obtaining lock to block concurrent updates at 1732246589066Preparing flush snapshotting stores in 1588230740 at 1732246589066Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732246589067 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732246589067Flushing 1588230740/ns: creating writer at 1732246589067Flushing 1588230740/ns: appending metadata at 1732246589082 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732246589082Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@724b850b: reopening flushed file at 1732246589096 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false at 1732246589108 (+12 ms)Writing region close event to WAL at 1732246589110 (+2 ms)Running coprocessor post-close hooks at 1732246589115 (+5 ms)Closed at 1732246589116 (+1 ms) 2024-11-22T03:36:29,116 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:36:29,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:36:29,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:36:29,266 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(976): stopping server b458937b0f5f,37999,1732246587784; all regions closed. 2024-11-22T03:36:29,266 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,267 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,267 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,267 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,267 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741834_1010 (size=1152) 2024-11-22T03:36:29,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741834_1010 (size=1152) 2024-11-22T03:36:29,272 DEBUG [RS:0;b458937b0f5f:37999 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/oldWALs 2024-11-22T03:36:29,272 INFO [RS:0;b458937b0f5f:37999 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C37999%2C1732246587784.meta:.meta(num 1732246588898) 2024-11-22T03:36:29,273 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,273 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,273 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,273 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,273 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741833_1009 (size=93) 2024-11-22T03:36:29,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741833_1009 (size=93) 2024-11-22T03:36:29,278 DEBUG [RS:0;b458937b0f5f:37999 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/oldWALs 2024-11-22T03:36:29,278 INFO [RS:0;b458937b0f5f:37999 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C37999%2C1732246587784:(num 1732246588427) 2024-11-22T03:36:29,278 DEBUG [RS:0;b458937b0f5f:37999 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:36:29,278 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:36:29,278 INFO [RS:0;b458937b0f5f:37999 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:36:29,278 INFO [RS:0;b458937b0f5f:37999 {}] hbase.ChoreService(370): Chore service for: regionserver/b458937b0f5f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:36:29,279 INFO [RS:0;b458937b0f5f:37999 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:36:29,279 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:36:29,279 INFO [RS:0;b458937b0f5f:37999 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37999 2024-11-22T03:36:29,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b458937b0f5f,37999,1732246587784 2024-11-22T03:36:29,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:36:29,292 INFO [RS:0;b458937b0f5f:37999 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:36:29,302 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b458937b0f5f,37999,1732246587784] 2024-11-22T03:36:29,313 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b458937b0f5f,37999,1732246587784 already deleted, retry=false 2024-11-22T03:36:29,313 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b458937b0f5f,37999,1732246587784 expired; onlineServers=0 2024-11-22T03:36:29,313 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b458937b0f5f,40693,1732246587597' ***** 2024-11-22T03:36:29,313 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:36:29,313 INFO [M:0;b458937b0f5f:40693 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:36:29,314 INFO [M:0;b458937b0f5f:40693 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:36:29,314 DEBUG [M:0;b458937b0f5f:40693 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:36:29,314 DEBUG [M:0;b458937b0f5f:40693 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:36:29,314 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:36:29,314 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246588207 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246588207,5,FailOnTimeoutGroup] 2024-11-22T03:36:29,314 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246588207 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246588207,5,FailOnTimeoutGroup] 2024-11-22T03:36:29,315 INFO [M:0;b458937b0f5f:40693 {}] hbase.ChoreService(370): Chore service for: master/b458937b0f5f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:36:29,315 INFO [M:0;b458937b0f5f:40693 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:36:29,316 DEBUG [M:0;b458937b0f5f:40693 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:36:29,316 INFO [M:0;b458937b0f5f:40693 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:36:29,316 INFO [M:0;b458937b0f5f:40693 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:36:29,316 INFO [M:0;b458937b0f5f:40693 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:36:29,317 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:36:29,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:36:29,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:29,327 DEBUG [M:0;b458937b0f5f:40693 {}] zookeeper.ZKUtil(347): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:36:29,327 WARN [M:0;b458937b0f5f:40693 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:36:29,328 INFO [M:0;b458937b0f5f:40693 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/.lastflushedseqids 2024-11-22T03:36:29,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741836_1012 (size=99) 2024-11-22T03:36:29,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741836_1012 (size=99) 2024-11-22T03:36:29,336 INFO [M:0;b458937b0f5f:40693 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:36:29,337 INFO [M:0;b458937b0f5f:40693 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:36:29,337 DEBUG [M:0;b458937b0f5f:40693 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:36:29,337 INFO [M:0;b458937b0f5f:40693 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:29,337 DEBUG [M:0;b458937b0f5f:40693 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:29,337 DEBUG [M:0;b458937b0f5f:40693 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:36:29,337 DEBUG [M:0;b458937b0f5f:40693 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:29,338 INFO [M:0;b458937b0f5f:40693 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-22T03:36:29,357 DEBUG [M:0;b458937b0f5f:40693 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14fc4b86e644458cb47acab2fc2615f7 is 82, key is hbase:meta,,1/info:regioninfo/1732246588929/Put/seqid=0 2024-11-22T03:36:29,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741837_1013 (size=5672) 2024-11-22T03:36:29,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741837_1013 (size=5672) 2024-11-22T03:36:29,364 INFO [M:0;b458937b0f5f:40693 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14fc4b86e644458cb47acab2fc2615f7 2024-11-22T03:36:29,385 DEBUG [M:0;b458937b0f5f:40693 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/76cf7664e9484e5eaa357f9ede4f0b8c is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732246588988/Put/seqid=0 2024-11-22T03:36:29,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741838_1014 (size=5275) 2024-11-22T03:36:29,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741838_1014 (size=5275) 2024-11-22T03:36:29,391 INFO [M:0;b458937b0f5f:40693 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/76cf7664e9484e5eaa357f9ede4f0b8c 2024-11-22T03:36:29,402 INFO [RS:0;b458937b0f5f:37999 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:36:29,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:36:29,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37999-0x101609d44680001, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:36:29,403 INFO [RS:0;b458937b0f5f:37999 {}] regionserver.HRegionServer(1031): Exiting; stopping=b458937b0f5f,37999,1732246587784; zookeeper connection closed. 2024-11-22T03:36:29,403 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@74d8c576 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@74d8c576 2024-11-22T03:36:29,403 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T03:36:29,415 DEBUG [M:0;b458937b0f5f:40693 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/48d1d9ad460c4739805bcb4223f1d72d is 69, key is b458937b0f5f,37999,1732246587784/rs:state/1732246588263/Put/seqid=0 2024-11-22T03:36:29,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741839_1015 (size=5156) 2024-11-22T03:36:29,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741839_1015 (size=5156) 2024-11-22T03:36:29,421 INFO [M:0;b458937b0f5f:40693 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/48d1d9ad460c4739805bcb4223f1d72d 2024-11-22T03:36:29,443 DEBUG [M:0;b458937b0f5f:40693 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0a81cb10e5ae43238e31f7198dc3a978 is 52, key is load_balancer_on/state:d/1732246589039/Put/seqid=0 2024-11-22T03:36:29,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741840_1016 (size=5056) 2024-11-22T03:36:29,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741840_1016 (size=5056) 2024-11-22T03:36:29,450 INFO [M:0;b458937b0f5f:40693 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0a81cb10e5ae43238e31f7198dc3a978 2024-11-22T03:36:29,457 DEBUG [M:0;b458937b0f5f:40693 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14fc4b86e644458cb47acab2fc2615f7 as hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/14fc4b86e644458cb47acab2fc2615f7 2024-11-22T03:36:29,463 INFO [M:0;b458937b0f5f:40693 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/14fc4b86e644458cb47acab2fc2615f7, entries=8, sequenceid=29, filesize=5.5 K 2024-11-22T03:36:29,464 DEBUG [M:0;b458937b0f5f:40693 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/76cf7664e9484e5eaa357f9ede4f0b8c as hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/76cf7664e9484e5eaa357f9ede4f0b8c 2024-11-22T03:36:29,470 INFO [M:0;b458937b0f5f:40693 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/76cf7664e9484e5eaa357f9ede4f0b8c, entries=3, sequenceid=29, filesize=5.2 K 2024-11-22T03:36:29,472 DEBUG [M:0;b458937b0f5f:40693 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/48d1d9ad460c4739805bcb4223f1d72d as hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/48d1d9ad460c4739805bcb4223f1d72d 2024-11-22T03:36:29,480 INFO [M:0;b458937b0f5f:40693 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/48d1d9ad460c4739805bcb4223f1d72d, entries=1, sequenceid=29, filesize=5.0 K 2024-11-22T03:36:29,482 DEBUG [M:0;b458937b0f5f:40693 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0a81cb10e5ae43238e31f7198dc3a978 as hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0a81cb10e5ae43238e31f7198dc3a978 2024-11-22T03:36:29,491 INFO [M:0;b458937b0f5f:40693 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41987/user/jenkins/test-data/6fcae4ab-bc12-c90c-2303-f352fd297ffb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0a81cb10e5ae43238e31f7198dc3a978, entries=1, sequenceid=29, filesize=4.9 K 2024-11-22T03:36:29,493 INFO [M:0;b458937b0f5f:40693 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=29, compaction requested=false 2024-11-22T03:36:29,495 INFO [M:0;b458937b0f5f:40693 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:29,495 DEBUG [M:0;b458937b0f5f:40693 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246589337Disabling compacts and flushes for region at 1732246589337Disabling writes for close at 1732246589337Obtaining lock to block concurrent updates at 1732246589338 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732246589338Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732246589338Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732246589339 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732246589339Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732246589357 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732246589357Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732246589369 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732246589384 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732246589384Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732246589397 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732246589415 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732246589415Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732246589427 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732246589443 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732246589443Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@22938327: reopening flushed file at 1732246589456 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@179a7d32: reopening flushed file at 1732246589463 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d07af51: reopening flushed file at 1732246589470 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1626a7c3: reopening flushed file at 1732246589481 (+11 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=29, compaction requested=false at 1732246589493 (+12 ms)Writing region close event to WAL at 1732246589495 (+2 ms)Closed at 1732246589495 2024-11-22T03:36:29,496 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,496 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,496 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,496 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,496 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:29,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741830_1006 (size=10311) 2024-11-22T03:36:29,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45531 is added to blk_1073741830_1006 (size=10311) 2024-11-22T03:36:29,500 INFO [M:0;b458937b0f5f:40693 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:36:29,500 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:36:29,500 INFO [M:0;b458937b0f5f:40693 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40693 2024-11-22T03:36:29,500 INFO [M:0;b458937b0f5f:40693 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:36:29,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:36:29,613 INFO [M:0;b458937b0f5f:40693 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:36:29,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40693-0x101609d44680000, quorum=127.0.0.1:64019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:36:29,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60e14a6f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:29,616 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1c3e0707{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:36:29,616 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:36:29,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a7eee1d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:36:29,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a18f7ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/hadoop.log.dir/,STOPPED} 2024-11-22T03:36:29,617 WARN [BP-1688087102-172.17.0.3-1732246584942 heartbeating to localhost/127.0.0.1:41987 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:36:29,617 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:36:29,617 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:36:29,617 WARN [BP-1688087102-172.17.0.3-1732246584942 heartbeating to localhost/127.0.0.1:41987 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1688087102-172.17.0.3-1732246584942 (Datanode Uuid df570608-37e0-4598-8e79-a80b72671ee7) service to localhost/127.0.0.1:41987 2024-11-22T03:36:29,618 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/cluster_5fb8c965-33ad-d855-9120-7982d7da1aa4/data/data3/current/BP-1688087102-172.17.0.3-1732246584942 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:29,618 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/cluster_5fb8c965-33ad-d855-9120-7982d7da1aa4/data/data4/current/BP-1688087102-172.17.0.3-1732246584942 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:29,618 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:36:29,625 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1deb503b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:29,625 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40ece9b3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:36:29,626 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:36:29,626 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ff1bba9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:36:29,626 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25910b08{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/hadoop.log.dir/,STOPPED} 2024-11-22T03:36:29,628 WARN [BP-1688087102-172.17.0.3-1732246584942 heartbeating to localhost/127.0.0.1:41987 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:36:29,628 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:36:29,628 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:36:29,628 WARN [BP-1688087102-172.17.0.3-1732246584942 heartbeating to localhost/127.0.0.1:41987 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1688087102-172.17.0.3-1732246584942 (Datanode Uuid 2f946c36-2f75-4b27-bb81-20295e2e81b1) service to localhost/127.0.0.1:41987 2024-11-22T03:36:29,629 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/cluster_5fb8c965-33ad-d855-9120-7982d7da1aa4/data/data1/current/BP-1688087102-172.17.0.3-1732246584942 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:29,629 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/cluster_5fb8c965-33ad-d855-9120-7982d7da1aa4/data/data2/current/BP-1688087102-172.17.0.3-1732246584942 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:29,629 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:36:29,635 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f1f2e53{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:36:29,636 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d7427ee{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:36:29,636 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:36:29,637 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e4aa3d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:36:29,637 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25a1dc3a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/hadoop.log.dir/,STOPPED} 2024-11-22T03:36:29,644 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:36:29,666 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:36:29,666 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:36:29,666 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/hadoop.log.dir so I do NOT create it in target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f 2024-11-22T03:36:29,666 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/36bbe37e-3f05-fe13-65a7-7c43611c8c7d/hadoop.tmp.dir so I do NOT create it in target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f 2024-11-22T03:36:29,667 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6, deleteOnExit=true 2024-11-22T03:36:29,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:36:29,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/test.cache.data in system properties and HBase conf 2024-11-22T03:36:29,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:36:29,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:36:29,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:36:29,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:36:29,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:36:29,667 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:36:29,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:36:29,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:36:29,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:36:29,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:36:29,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:36:29,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:36:29,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:36:29,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:36:29,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:36:29,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:36:29,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:36:29,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:36:29,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:36:29,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:36:29,681 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:36:29,769 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:36:29,770 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:36:29,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:36:29,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:36:29,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:36:30,047 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:36:30,052 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:36:30,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:36:30,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:36:30,054 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:36:30,054 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:36:30,055 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28c5c44c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:36:30,055 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c23a77f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:36:30,148 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@657d9fe5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/java.io.tmpdir/jetty-localhost-33509-hadoop-hdfs-3_4_1-tests_jar-_-any-9943161740921351170/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:36:30,149 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@77a48b4f{HTTP/1.1, (http/1.1)}{localhost:33509} 2024-11-22T03:36:30,149 INFO [Time-limited test {}] server.Server(415): Started @109566ms 2024-11-22T03:36:30,160 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:36:30,289 INFO [regionserver/b458937b0f5f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:36:30,415 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:36:30,419 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:36:30,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:36:30,421 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:36:30,421 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:36:30,421 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cadc2ac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:36:30,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@720bc7eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:36:30,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@31ec175a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/java.io.tmpdir/jetty-localhost-41705-hadoop-hdfs-3_4_1-tests_jar-_-any-10662520219350334045/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:30,520 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a37e627{HTTP/1.1, (http/1.1)}{localhost:41705} 2024-11-22T03:36:30,521 INFO [Time-limited test {}] server.Server(415): Started @109938ms 2024-11-22T03:36:30,522 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:36:30,556 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:36:30,560 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:36:30,561 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:36:30,561 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:36:30,561 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:36:30,561 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34dee2df{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:36:30,562 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2fc66dd1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:36:30,657 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c032901{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/java.io.tmpdir/jetty-localhost-43101-hadoop-hdfs-3_4_1-tests_jar-_-any-11837635761330773247/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:30,658 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@587e9393{HTTP/1.1, (http/1.1)}{localhost:43101} 2024-11-22T03:36:30,658 INFO [Time-limited test {}] server.Server(415): Started @110075ms 2024-11-22T03:36:30,659 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:36:31,922 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data1/current/BP-300744400-172.17.0.3-1732246589693/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:31,922 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data2/current/BP-300744400-172.17.0.3-1732246589693/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:31,942 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:36:31,944 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x35c2cef4c2a3772 with lease ID 0x4da318c3fd82aa98: Processing first storage report for DS-798cc6a3-858c-4921-ace9-e9c509aa79d3 from datanode DatanodeRegistration(127.0.0.1:46063, datanodeUuid=da96f6bb-c5d1-42f9-b81d-70e565f7d334, infoPort=35377, infoSecurePort=0, ipcPort=35355, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693) 2024-11-22T03:36:31,944 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x35c2cef4c2a3772 with lease ID 0x4da318c3fd82aa98: from storage DS-798cc6a3-858c-4921-ace9-e9c509aa79d3 node DatanodeRegistration(127.0.0.1:46063, datanodeUuid=da96f6bb-c5d1-42f9-b81d-70e565f7d334, infoPort=35377, infoSecurePort=0, ipcPort=35355, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:31,944 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x35c2cef4c2a3772 with lease ID 0x4da318c3fd82aa98: Processing first storage report for DS-e58857a9-e4ab-4efc-8ce5-02972790446d from datanode DatanodeRegistration(127.0.0.1:46063, datanodeUuid=da96f6bb-c5d1-42f9-b81d-70e565f7d334, infoPort=35377, infoSecurePort=0, ipcPort=35355, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693) 2024-11-22T03:36:31,944 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x35c2cef4c2a3772 with lease ID 0x4da318c3fd82aa98: from storage DS-e58857a9-e4ab-4efc-8ce5-02972790446d node DatanodeRegistration(127.0.0.1:46063, datanodeUuid=da96f6bb-c5d1-42f9-b81d-70e565f7d334, infoPort=35377, infoSecurePort=0, ipcPort=35355, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:32,196 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data3/current/BP-300744400-172.17.0.3-1732246589693/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:32,196 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data4/current/BP-300744400-172.17.0.3-1732246589693/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:32,216 WARN [Thread-659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:36:32,219 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3cd72d1235b8ffe7 with lease ID 0x4da318c3fd82aa99: Processing first storage report for DS-72b5788c-7e29-42d6-878e-2bd50f6714bb from datanode DatanodeRegistration(127.0.0.1:35503, datanodeUuid=c941676d-a41a-4732-8e59-96d141820b93, infoPort=37177, infoSecurePort=0, ipcPort=37089, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693) 2024-11-22T03:36:32,219 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3cd72d1235b8ffe7 with lease ID 0x4da318c3fd82aa99: from storage DS-72b5788c-7e29-42d6-878e-2bd50f6714bb node DatanodeRegistration(127.0.0.1:35503, datanodeUuid=c941676d-a41a-4732-8e59-96d141820b93, infoPort=37177, infoSecurePort=0, ipcPort=37089, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:32,219 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3cd72d1235b8ffe7 with lease ID 0x4da318c3fd82aa99: Processing first storage report for DS-134a0a23-f9c0-4c88-b7ea-2ac2f7297d0d from datanode DatanodeRegistration(127.0.0.1:35503, datanodeUuid=c941676d-a41a-4732-8e59-96d141820b93, infoPort=37177, infoSecurePort=0, ipcPort=37089, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693) 2024-11-22T03:36:32,219 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3cd72d1235b8ffe7 with lease ID 0x4da318c3fd82aa99: from storage DS-134a0a23-f9c0-4c88-b7ea-2ac2f7297d0d node DatanodeRegistration(127.0.0.1:35503, datanodeUuid=c941676d-a41a-4732-8e59-96d141820b93, infoPort=37177, infoSecurePort=0, ipcPort=37089, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:32,298 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f 2024-11-22T03:36:32,301 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/zookeeper_0, clientPort=57543, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:36:32,302 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57543 2024-11-22T03:36:32,302 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:32,304 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:32,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:36:32,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:36:32,317 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce with version=8 2024-11-22T03:36:32,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/hbase-staging 2024-11-22T03:36:32,320 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:36:32,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:32,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:32,320 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:36:32,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:32,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:36:32,320 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:36:32,321 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:36:32,321 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42831 2024-11-22T03:36:32,323 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42831 connecting to ZooKeeper ensemble=127.0.0.1:57543 2024-11-22T03:36:32,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:428310x0, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:36:32,384 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42831-0x101609d56d70000 connected 2024-11-22T03:36:32,463 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:32,465 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:32,467 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:36:32,468 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce, hbase.cluster.distributed=false 2024-11-22T03:36:32,470 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:36:32,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42831 2024-11-22T03:36:32,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42831 2024-11-22T03:36:32,471 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42831 2024-11-22T03:36:32,471 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42831 2024-11-22T03:36:32,471 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42831 2024-11-22T03:36:32,490 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:36:32,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:32,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:32,490 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:36:32,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:32,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:36:32,491 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:36:32,491 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:36:32,491 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39625 2024-11-22T03:36:32,493 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39625 connecting to ZooKeeper ensemble=127.0.0.1:57543 2024-11-22T03:36:32,493 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:32,495 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:32,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:396250x0, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:36:32,506 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:396250x0, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:36:32,506 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39625-0x101609d56d70001 connected 2024-11-22T03:36:32,506 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:36:32,507 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:36:32,507 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:36:32,509 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:36:32,510 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39625 2024-11-22T03:36:32,510 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39625 2024-11-22T03:36:32,510 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39625 2024-11-22T03:36:32,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39625 2024-11-22T03:36:32,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39625 2024-11-22T03:36:32,522 DEBUG [M:0;b458937b0f5f:42831 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b458937b0f5f:42831 2024-11-22T03:36:32,523 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b458937b0f5f,42831,1732246592320 2024-11-22T03:36:32,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:36:32,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:36:32,534 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b458937b0f5f,42831,1732246592320 2024-11-22T03:36:32,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:36:32,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:32,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:32,548 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:36:32,549 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b458937b0f5f,42831,1732246592320 from backup master directory 2024-11-22T03:36:32,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b458937b0f5f,42831,1732246592320 2024-11-22T03:36:32,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:36:32,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:36:32,558 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:36:32,558 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b458937b0f5f,42831,1732246592320 2024-11-22T03:36:32,564 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/hbase.id] with ID: 3c093924-f944-456b-8a02-53a71d0b644f 2024-11-22T03:36:32,564 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/.tmp/hbase.id 2024-11-22T03:36:32,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:36:32,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:36:32,572 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/.tmp/hbase.id]:[hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/hbase.id] 2024-11-22T03:36:32,586 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:32,586 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:36:32,587 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T03:36:32,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:32,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:32,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:36:32,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:36:32,607 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:36:32,608 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:36:32,608 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:36:32,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:36:32,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:36:32,618 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store 2024-11-22T03:36:32,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:36:32,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:36:32,627 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:36:32,627 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:36:32,627 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:32,627 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:32,627 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:36:32,627 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:32,627 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:36:32,627 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246592627Disabling compacts and flushes for region at 1732246592627Disabling writes for close at 1732246592627Writing region close event to WAL at 1732246592627Closed at 1732246592627 2024-11-22T03:36:32,628 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/.initializing 2024-11-22T03:36:32,628 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320 2024-11-22T03:36:32,631 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C42831%2C1732246592320, suffix=, logDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320, archiveDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/oldWALs, maxLogs=10 2024-11-22T03:36:32,632 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C42831%2C1732246592320.1732246592632 2024-11-22T03:36:32,638 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320/b458937b0f5f%2C42831%2C1732246592320.1732246592632 2024-11-22T03:36:32,640 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35377:35377),(127.0.0.1/127.0.0.1:37177:37177)] 2024-11-22T03:36:32,641 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:36:32,641 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:36:32,641 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:32,641 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:32,643 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:32,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:36:32,645 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:32,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:32,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:32,647 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:36:32,647 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:32,648 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:36:32,648 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:32,649 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:36:32,649 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:32,650 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:36:32,650 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:32,651 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:36:32,652 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:32,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:36:32,652 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:32,654 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:32,654 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:32,656 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:32,656 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:32,657 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:36:32,658 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:36:32,661 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:36:32,661 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862634, jitterRate=0.09689652919769287}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:36:32,662 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732246592642Initializing all the Stores at 1732246592643 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246592643Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246592643Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246592643Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246592643Cleaning up temporary data from old regions at 1732246592656 (+13 ms)Region opened successfully at 1732246592662 (+6 ms) 2024-11-22T03:36:32,662 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:36:32,666 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@749295d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:36:32,667 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:36:32,667 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:36:32,667 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:36:32,667 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:36:32,668 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T03:36:32,668 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T03:36:32,668 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:36:32,671 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:36:32,672 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:36:32,684 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:36:32,684 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:36:32,685 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:36:32,694 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:36:32,695 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:36:32,696 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:36:32,705 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:36:32,706 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:36:32,716 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:36:32,718 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:36:32,726 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:36:32,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:36:32,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:36:32,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:32,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:32,737 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b458937b0f5f,42831,1732246592320, sessionid=0x101609d56d70000, setting cluster-up flag (Was=false) 2024-11-22T03:36:32,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:32,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:32,789 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:36:32,791 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,42831,1732246592320 2024-11-22T03:36:32,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:32,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:32,842 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:36:32,843 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,42831,1732246592320 2024-11-22T03:36:32,845 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:36:32,847 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:36:32,847 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:36:32,847 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:36:32,847 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b458937b0f5f,42831,1732246592320 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:36:32,849 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:36:32,849 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:36:32,849 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:36:32,849 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:36:32,849 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b458937b0f5f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:36:32,849 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,849 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:36:32,850 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,851 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732246622851 2024-11-22T03:36:32,851 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:36:32,851 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:36:32,851 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:36:32,851 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:36:32,851 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:36:32,851 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:36:32,851 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:32,851 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:36:32,852 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:36:32,852 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:36:32,852 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:36:32,852 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:36:32,853 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:32,853 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:36:32,856 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:36:32,856 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:36:32,857 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246592856,5,FailOnTimeoutGroup] 2024-11-22T03:36:32,857 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246592857,5,FailOnTimeoutGroup] 2024-11-22T03:36:32,857 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:32,857 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:36:32,857 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:32,857 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:32,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:36:32,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:36:32,865 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:36:32,865 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce 2024-11-22T03:36:32,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:36:32,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:36:32,889 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:36:32,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:36:32,894 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:36:32,894 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:32,894 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:32,895 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:36:32,896 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:36:32,896 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:32,897 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:32,897 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:36:32,899 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:36:32,899 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:32,899 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:32,899 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:36:32,901 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:36:32,901 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:32,901 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:32,902 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:36:32,902 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740 2024-11-22T03:36:32,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740 2024-11-22T03:36:32,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:36:32,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:36:32,905 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:36:32,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:36:32,908 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:36:32,908 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=793100, jitterRate=0.008479133248329163}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:36:32,909 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732246592889Initializing all the Stores at 1732246592890 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246592891 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246592892 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246592892Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246592892Cleaning up temporary data from old regions at 1732246592904 (+12 ms)Region opened successfully at 1732246592909 (+5 ms) 2024-11-22T03:36:32,909 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:36:32,909 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:36:32,909 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:36:32,909 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:36:32,909 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:36:32,910 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:36:32,910 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246592909Disabling compacts and flushes for region at 1732246592909Disabling writes for close at 1732246592909Writing region close event to WAL at 1732246592910 (+1 ms)Closed at 1732246592910 2024-11-22T03:36:32,911 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:36:32,911 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:36:32,911 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:36:32,913 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(746): ClusterId : 3c093924-f944-456b-8a02-53a71d0b644f 2024-11-22T03:36:32,913 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:36:32,913 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:36:32,914 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:36:32,924 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:36:32,924 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:36:32,938 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:36:32,939 DEBUG [RS:0;b458937b0f5f:39625 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23a87de9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:36:32,956 DEBUG [RS:0;b458937b0f5f:39625 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b458937b0f5f:39625 2024-11-22T03:36:32,957 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:36:32,957 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:36:32,957 DEBUG [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:36:32,958 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(2659): reportForDuty to master=b458937b0f5f,42831,1732246592320 with port=39625, startcode=1732246592490 2024-11-22T03:36:32,958 DEBUG [RS:0;b458937b0f5f:39625 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:36:32,960 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50729, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:36:32,961 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42831 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b458937b0f5f,39625,1732246592490 2024-11-22T03:36:32,961 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42831 {}] master.ServerManager(517): Registering regionserver=b458937b0f5f,39625,1732246592490 2024-11-22T03:36:32,963 DEBUG [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce 2024-11-22T03:36:32,963 DEBUG [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33231 2024-11-22T03:36:32,963 DEBUG [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:36:32,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:36:32,976 DEBUG [RS:0;b458937b0f5f:39625 {}] zookeeper.ZKUtil(111): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b458937b0f5f,39625,1732246592490 2024-11-22T03:36:32,976 WARN [RS:0;b458937b0f5f:39625 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:36:32,976 INFO [RS:0;b458937b0f5f:39625 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:36:32,977 DEBUG [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490 2024-11-22T03:36:32,977 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b458937b0f5f,39625,1732246592490] 2024-11-22T03:36:32,980 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:36:32,983 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:36:32,983 INFO [RS:0;b458937b0f5f:39625 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:36:32,983 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:32,983 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:36:32,984 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:36:32,985 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:32,985 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,985 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,985 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,985 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,985 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,985 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:36:32,985 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,985 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,985 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,985 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,985 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,986 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:32,986 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:36:32,986 DEBUG [RS:0;b458937b0f5f:39625 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:36:32,987 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:32,987 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:32,987 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:32,987 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:32,987 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:32,987 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,39625,1732246592490-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:36:33,002 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:36:33,002 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,39625,1732246592490-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,003 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,003 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.Replication(171): b458937b0f5f,39625,1732246592490 started 2024-11-22T03:36:33,017 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,017 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(1482): Serving as b458937b0f5f,39625,1732246592490, RpcServer on b458937b0f5f/172.17.0.3:39625, sessionid=0x101609d56d70001 2024-11-22T03:36:33,017 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:36:33,017 DEBUG [RS:0;b458937b0f5f:39625 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b458937b0f5f,39625,1732246592490 2024-11-22T03:36:33,017 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,39625,1732246592490' 2024-11-22T03:36:33,017 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:36:33,018 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:36:33,018 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:36:33,018 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:36:33,018 DEBUG [RS:0;b458937b0f5f:39625 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b458937b0f5f,39625,1732246592490 2024-11-22T03:36:33,018 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,39625,1732246592490' 2024-11-22T03:36:33,018 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:36:33,019 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:36:33,019 DEBUG [RS:0;b458937b0f5f:39625 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:36:33,019 INFO [RS:0;b458937b0f5f:39625 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:36:33,019 INFO [RS:0;b458937b0f5f:39625 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:36:33,065 WARN [b458937b0f5f:42831 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:36:33,122 INFO [RS:0;b458937b0f5f:39625 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C39625%2C1732246592490, suffix=, logDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490, archiveDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/oldWALs, maxLogs=32 2024-11-22T03:36:33,124 INFO [RS:0;b458937b0f5f:39625 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C39625%2C1732246592490.1732246593123 2024-11-22T03:36:33,154 INFO [RS:0;b458937b0f5f:39625 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 2024-11-22T03:36:33,155 DEBUG [RS:0;b458937b0f5f:39625 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35377:35377),(127.0.0.1/127.0.0.1:37177:37177)] 2024-11-22T03:36:33,315 DEBUG [b458937b0f5f:42831 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:36:33,316 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b458937b0f5f,39625,1732246592490 2024-11-22T03:36:33,320 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,39625,1732246592490, state=OPENING 2024-11-22T03:36:33,397 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:36:33,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:33,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:36:33,412 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:36:33,412 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:36:33,412 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:36:33,412 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,39625,1732246592490}] 2024-11-22T03:36:33,568 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:36:33,571 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54357, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:36:33,576 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:36:33,576 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:36:33,579 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C39625%2C1732246592490.meta, suffix=.meta, logDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490, archiveDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/oldWALs, maxLogs=32 2024-11-22T03:36:33,580 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta 2024-11-22T03:36:33,587 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta 2024-11-22T03:36:33,590 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35377:35377),(127.0.0.1/127.0.0.1:37177:37177)] 2024-11-22T03:36:33,593 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:36:33,594 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:36:33,594 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:36:33,594 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:36:33,594 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:36:33,594 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:36:33,594 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:36:33,594 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:36:33,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:36:33,597 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:36:33,597 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:33,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:33,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:36:33,598 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:36:33,598 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:33,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:33,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:36:33,600 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:36:33,600 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:33,600 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:33,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:36:33,601 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:36:33,602 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:33,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:36:33,602 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:36:33,603 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740 2024-11-22T03:36:33,604 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740 2024-11-22T03:36:33,606 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:36:33,606 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:36:33,607 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:36:33,608 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:36:33,609 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847940, jitterRate=0.07821200788021088}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:36:33,609 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:36:33,610 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732246593594Writing region info on filesystem at 1732246593594Initializing all the Stores at 1732246593595 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246593595Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246593596 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246593596Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246593596Cleaning up temporary data from old regions at 1732246593606 (+10 ms)Running coprocessor post-open hooks at 1732246593609 (+3 ms)Region opened successfully at 1732246593610 (+1 ms) 2024-11-22T03:36:33,611 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732246593568 2024-11-22T03:36:33,614 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:36:33,614 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:36:33,615 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b458937b0f5f,39625,1732246592490 2024-11-22T03:36:33,616 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,39625,1732246592490, state=OPEN 2024-11-22T03:36:33,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:36:33,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:36:33,660 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b458937b0f5f,39625,1732246592490 2024-11-22T03:36:33,660 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:36:33,660 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:36:33,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:36:33,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,39625,1732246592490 in 248 msec 2024-11-22T03:36:33,667 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:36:33,668 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 753 msec 2024-11-22T03:36:33,669 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:36:33,669 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:36:33,670 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:36:33,670 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,39625,1732246592490, seqNum=-1] 2024-11-22T03:36:33,671 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:36:33,672 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52477, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:36:33,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 831 msec 2024-11-22T03:36:33,679 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732246593679, completionTime=-1 2024-11-22T03:36:33,679 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:36:33,679 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:36:33,681 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:36:33,681 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732246653681 2024-11-22T03:36:33,681 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732246713681 2024-11-22T03:36:33,681 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T03:36:33,682 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,42831,1732246592320-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,682 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,42831,1732246592320-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,682 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,42831,1732246592320-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,682 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b458937b0f5f:42831, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,682 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,682 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,684 DEBUG [master/b458937b0f5f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:36:33,686 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.128sec 2024-11-22T03:36:33,686 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:36:33,686 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:36:33,686 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:36:33,686 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:36:33,686 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:36:33,686 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,42831,1732246592320-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:36:33,686 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,42831,1732246592320-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:36:33,689 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:36:33,689 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:36:33,689 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,42831,1732246592320-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,713 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798d3535, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:36:33,713 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b458937b0f5f,42831,-1 for getting cluster id 2024-11-22T03:36:33,714 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:36:33,715 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3c093924-f944-456b-8a02-53a71d0b644f' 2024-11-22T03:36:33,716 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:36:33,716 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3c093924-f944-456b-8a02-53a71d0b644f" 2024-11-22T03:36:33,716 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f633512, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:36:33,716 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b458937b0f5f,42831,-1] 2024-11-22T03:36:33,716 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:36:33,717 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:36:33,718 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56082, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:36:33,720 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e35caec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:36:33,720 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:36:33,721 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,39625,1732246592490, seqNum=-1] 2024-11-22T03:36:33,722 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:36:33,724 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47884, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:36:33,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b458937b0f5f,42831,1732246592320 2024-11-22T03:36:33,726 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:33,730 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:36:33,752 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:36:33,752 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:33,752 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:33,752 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:36:33,752 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:36:33,752 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:36:33,752 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:36:33,752 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:36:33,753 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43975 2024-11-22T03:36:33,755 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43975 connecting to ZooKeeper ensemble=127.0.0.1:57543 2024-11-22T03:36:33,755 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:33,757 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:36:33,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:439750x0, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:36:33,779 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43975-0x101609d56d70002 connected 2024-11-22T03:36:33,779 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:43975-0x101609d56d70002, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-22T03:36:33,779 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-22T03:36:33,780 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:36:33,781 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:36:33,782 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:43975-0x101609d56d70002, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:36:33,784 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43975-0x101609d56d70002, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:36:33,785 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43975 2024-11-22T03:36:33,785 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43975 2024-11-22T03:36:33,788 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43975 2024-11-22T03:36:33,788 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43975 2024-11-22T03:36:33,788 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43975 2024-11-22T03:36:33,790 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.HRegionServer(746): ClusterId : 3c093924-f944-456b-8a02-53a71d0b644f 2024-11-22T03:36:33,790 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:36:33,800 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:36:33,800 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:36:33,812 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:36:33,812 DEBUG [RS:1;b458937b0f5f:43975 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d444263, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:36:33,826 DEBUG [RS:1;b458937b0f5f:43975 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b458937b0f5f:43975 2024-11-22T03:36:33,826 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:36:33,826 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:36:33,826 DEBUG [RS:1;b458937b0f5f:43975 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:36:33,827 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.HRegionServer(2659): reportForDuty to master=b458937b0f5f,42831,1732246592320 with port=43975, startcode=1732246593751 2024-11-22T03:36:33,827 DEBUG [RS:1;b458937b0f5f:43975 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:36:33,829 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56137, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:36:33,829 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42831 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b458937b0f5f,43975,1732246593751 2024-11-22T03:36:33,829 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42831 {}] master.ServerManager(517): Registering regionserver=b458937b0f5f,43975,1732246593751 2024-11-22T03:36:33,831 DEBUG [RS:1;b458937b0f5f:43975 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce 2024-11-22T03:36:33,831 DEBUG [RS:1;b458937b0f5f:43975 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33231 2024-11-22T03:36:33,831 DEBUG [RS:1;b458937b0f5f:43975 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:36:33,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:36:33,842 DEBUG [RS:1;b458937b0f5f:43975 {}] zookeeper.ZKUtil(111): regionserver:43975-0x101609d56d70002, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b458937b0f5f,43975,1732246593751 2024-11-22T03:36:33,842 WARN [RS:1;b458937b0f5f:43975 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:36:33,843 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b458937b0f5f,43975,1732246593751] 2024-11-22T03:36:33,843 INFO [RS:1;b458937b0f5f:43975 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:36:33,843 DEBUG [RS:1;b458937b0f5f:43975 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751 2024-11-22T03:36:33,847 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:36:33,849 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:36:33,849 INFO [RS:1;b458937b0f5f:43975 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:36:33,849 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,849 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:36:33,850 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:36:33,850 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,850 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:33,850 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:33,850 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:33,850 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:33,851 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:33,851 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:36:33,851 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:33,851 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:33,851 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:33,851 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:33,851 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:33,851 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:36:33,851 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:36:33,851 DEBUG [RS:1;b458937b0f5f:43975 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:36:33,851 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,851 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,852 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,852 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,852 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,852 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,43975,1732246593751-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:36:33,865 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:36:33,865 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,43975,1732246593751-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,865 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,865 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.Replication(171): b458937b0f5f,43975,1732246593751 started 2024-11-22T03:36:33,877 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:36:33,877 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.HRegionServer(1482): Serving as b458937b0f5f,43975,1732246593751, RpcServer on b458937b0f5f/172.17.0.3:43975, sessionid=0x101609d56d70002 2024-11-22T03:36:33,877 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:36:33,877 DEBUG [RS:1;b458937b0f5f:43975 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b458937b0f5f,43975,1732246593751 2024-11-22T03:36:33,877 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;b458937b0f5f:43975,5,FailOnTimeoutGroup] 2024-11-22T03:36:33,877 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,43975,1732246593751' 2024-11-22T03:36:33,877 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:36:33,878 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-22T03:36:33,878 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T03:36:33,878 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:36:33,879 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:36:33,879 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:36:33,879 DEBUG [RS:1;b458937b0f5f:43975 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b458937b0f5f,43975,1732246593751 2024-11-22T03:36:33,879 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,43975,1732246593751' 2024-11-22T03:36:33,879 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:36:33,879 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:36:33,879 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is b458937b0f5f,42831,1732246592320 2024-11-22T03:36:33,879 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@60c9ef5b 2024-11-22T03:36:33,879 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T03:36:33,880 DEBUG [RS:1;b458937b0f5f:43975 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:36:33,880 INFO [RS:1;b458937b0f5f:43975 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:36:33,880 INFO [RS:1;b458937b0f5f:43975 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:36:33,881 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56094, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T03:36:33,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42831 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T03:36:33,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42831 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T03:36:33,882 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42831 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:36:33,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42831 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T03:36:33,885 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T03:36:33,885 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:33,885 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42831 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-22T03:36:33,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42831 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:36:33,887 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T03:36:33,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741835_1011 (size=393) 2024-11-22T03:36:33,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741835_1011 (size=393) 2024-11-22T03:36:33,896 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => aa337c81b1b655cad878320c92b169ba, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce 2024-11-22T03:36:33,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741836_1012 (size=76) 2024-11-22T03:36:33,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741836_1012 (size=76) 2024-11-22T03:36:33,905 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:36:33,905 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing aa337c81b1b655cad878320c92b169ba, disabling compactions & flushes 2024-11-22T03:36:33,905 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:36:33,905 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:36:33,905 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. after waiting 0 ms 2024-11-22T03:36:33,905 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:36:33,905 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:36:33,905 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for aa337c81b1b655cad878320c92b169ba: Waiting for close lock at 1732246593905Disabling compacts and flushes for region at 1732246593905Disabling writes for close at 1732246593905Writing region close event to WAL at 1732246593905Closed at 1732246593905 2024-11-22T03:36:33,908 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T03:36:33,908 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732246593908"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732246593908"}]},"ts":"1732246593908"} 2024-11-22T03:36:33,912 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T03:36:33,913 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T03:36:33,914 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732246593913"}]},"ts":"1732246593913"} 2024-11-22T03:36:33,916 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-22T03:36:33,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=aa337c81b1b655cad878320c92b169ba, ASSIGN}] 2024-11-22T03:36:33,918 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=aa337c81b1b655cad878320c92b169ba, ASSIGN 2024-11-22T03:36:33,919 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=aa337c81b1b655cad878320c92b169ba, ASSIGN; state=OFFLINE, location=b458937b0f5f,39625,1732246592490; forceNewPlan=false, retain=false 2024-11-22T03:36:33,982 INFO [RS:1;b458937b0f5f:43975 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C43975%2C1732246593751, suffix=, logDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751, archiveDir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/oldWALs, maxLogs=32 2024-11-22T03:36:33,983 INFO [RS:1;b458937b0f5f:43975 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C43975%2C1732246593751.1732246593983 2024-11-22T03:36:33,993 INFO [RS:1;b458937b0f5f:43975 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 2024-11-22T03:36:33,994 DEBUG [RS:1;b458937b0f5f:43975 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37177:37177),(127.0.0.1/127.0.0.1:35377:35377)] 2024-11-22T03:36:34,071 INFO [b458937b0f5f:42831 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-22T03:36:34,072 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aa337c81b1b655cad878320c92b169ba, regionState=OPENING, regionLocation=b458937b0f5f,39625,1732246592490 2024-11-22T03:36:34,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=aa337c81b1b655cad878320c92b169ba, ASSIGN because future has completed 2024-11-22T03:36:34,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa337c81b1b655cad878320c92b169ba, server=b458937b0f5f,39625,1732246592490}] 2024-11-22T03:36:34,237 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:36:34,238 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => aa337c81b1b655cad878320c92b169ba, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:36:34,238 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:34,239 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:36:34,239 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:34,239 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:34,241 INFO [StoreOpener-aa337c81b1b655cad878320c92b169ba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:34,242 INFO [StoreOpener-aa337c81b1b655cad878320c92b169ba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aa337c81b1b655cad878320c92b169ba columnFamilyName info 2024-11-22T03:36:34,242 DEBUG [StoreOpener-aa337c81b1b655cad878320c92b169ba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:36:34,243 INFO [StoreOpener-aa337c81b1b655cad878320c92b169ba-1 {}] regionserver.HStore(327): Store=aa337c81b1b655cad878320c92b169ba/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:36:34,243 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:34,244 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:34,244 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:34,245 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:34,245 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:34,247 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:34,249 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:36:34,249 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened aa337c81b1b655cad878320c92b169ba; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780610, jitterRate=-0.007403731346130371}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:36:34,249 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:34,250 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for aa337c81b1b655cad878320c92b169ba: Running coprocessor pre-open hook at 1732246594239Writing region info on filesystem at 1732246594239Initializing all the Stores at 1732246594240 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246594240Cleaning up temporary data from old regions at 1732246594245 (+5 ms)Running coprocessor post-open hooks at 1732246594249 (+4 ms)Region opened successfully at 1732246594249 2024-11-22T03:36:34,251 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba., pid=6, masterSystemTime=1732246594233 2024-11-22T03:36:34,253 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:36:34,253 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:36:34,254 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aa337c81b1b655cad878320c92b169ba, regionState=OPEN, openSeqNum=2, regionLocation=b458937b0f5f,39625,1732246592490 2024-11-22T03:36:34,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa337c81b1b655cad878320c92b169ba, server=b458937b0f5f,39625,1732246592490 because future has completed 2024-11-22T03:36:34,261 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T03:36:34,261 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure aa337c81b1b655cad878320c92b169ba, server=b458937b0f5f,39625,1732246592490 in 180 msec 2024-11-22T03:36:34,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T03:36:34,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=aa337c81b1b655cad878320c92b169ba, ASSIGN in 345 msec 2024-11-22T03:36:34,265 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T03:36:34,265 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732246594265"}]},"ts":"1732246594265"} 2024-11-22T03:36:34,268 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-22T03:36:34,270 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T03:36:34,273 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 388 msec 2024-11-22T03:36:37,892 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:36:37,892 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T03:36:37,894 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T03:36:37,895 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-22T03:36:37,896 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:36:37,897 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T03:36:37,897 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T03:36:37,897 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T03:36:39,102 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:36:39,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:36:39,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:36:39,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:36:39,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:36:39,138 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-22T03:36:43,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42831 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:36:43,954 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-22T03:36:43,954 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-22T03:36:43,959 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T03:36:43,959 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:36:43,981 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:36:43,987 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:36:43,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:36:43,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:36:43,988 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:36:43,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@746d5659{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:36:43,998 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@668b2d84{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:36:44,114 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@106410c2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/java.io.tmpdir/jetty-localhost-39661-hadoop-hdfs-3_4_1-tests_jar-_-any-9077487696952273581/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:44,115 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@714a0d6a{HTTP/1.1, (http/1.1)}{localhost:39661} 2024-11-22T03:36:44,115 INFO [Time-limited test {}] server.Server(415): Started @123532ms 2024-11-22T03:36:44,117 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:36:44,166 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:36:44,171 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:36:44,173 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:36:44,173 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:36:44,173 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:36:44,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46c36b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:36:44,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fa42555{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:36:44,287 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35a68dc4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/java.io.tmpdir/jetty-localhost-33039-hadoop-hdfs-3_4_1-tests_jar-_-any-2374013865824932836/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:44,287 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1fad0376{HTTP/1.1, (http/1.1)}{localhost:33039} 2024-11-22T03:36:44,287 INFO [Time-limited test {}] server.Server(415): Started @123705ms 2024-11-22T03:36:44,288 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:36:44,330 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:36:44,334 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:36:44,335 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:36:44,335 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:36:44,335 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:36:44,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@90741e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:36:44,336 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10f48a0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:36:44,438 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78e0bf7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/java.io.tmpdir/jetty-localhost-35871-hadoop-hdfs-3_4_1-tests_jar-_-any-4232705234803737959/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:44,439 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@232ae44d{HTTP/1.1, (http/1.1)}{localhost:35871} 2024-11-22T03:36:44,439 INFO [Time-limited test {}] server.Server(415): Started @123857ms 2024-11-22T03:36:44,440 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:36:45,382 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6/current/BP-300744400-172.17.0.3-1732246589693/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:45,382 WARN [Thread-866 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5/current/BP-300744400-172.17.0.3-1732246589693/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:45,413 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:36:45,416 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14415688583fc705 with lease ID 0x4da318c3fd82aa9a: Processing first storage report for DS-f73ca267-7c11-49d8-b21d-fb4edb69881d from datanode DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693) 2024-11-22T03:36:45,416 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14415688583fc705 with lease ID 0x4da318c3fd82aa9a: from storage DS-f73ca267-7c11-49d8-b21d-fb4edb69881d node DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:45,416 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14415688583fc705 with lease ID 0x4da318c3fd82aa9a: Processing first storage report for DS-a1db03f7-8eae-48ca-9245-f5d7f3ca9d74 from datanode DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693) 2024-11-22T03:36:45,416 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14415688583fc705 with lease ID 0x4da318c3fd82aa9a: from storage DS-a1db03f7-8eae-48ca-9245-f5d7f3ca9d74 node DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T03:36:45,614 WARN [Thread-878 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data7/current/BP-300744400-172.17.0.3-1732246589693/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:45,614 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data8/current/BP-300744400-172.17.0.3-1732246589693/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:45,636 WARN [Thread-830 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:36:45,639 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c99adb333162f06 with lease ID 0x4da318c3fd82aa9b: Processing first storage report for DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20 from datanode DatanodeRegistration(127.0.0.1:38489, datanodeUuid=de353a9d-f890-4e48-bfa2-6b73e3d39066, infoPort=46029, infoSecurePort=0, ipcPort=43461, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693) 2024-11-22T03:36:45,639 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c99adb333162f06 with lease ID 0x4da318c3fd82aa9b: from storage DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20 node DatanodeRegistration(127.0.0.1:38489, datanodeUuid=de353a9d-f890-4e48-bfa2-6b73e3d39066, infoPort=46029, infoSecurePort=0, ipcPort=43461, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:45,640 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c99adb333162f06 with lease ID 0x4da318c3fd82aa9b: Processing first storage report for DS-b4759591-da13-4f14-bc58-cb94d07d0304 from datanode DatanodeRegistration(127.0.0.1:38489, datanodeUuid=de353a9d-f890-4e48-bfa2-6b73e3d39066, infoPort=46029, infoSecurePort=0, ipcPort=43461, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693) 2024-11-22T03:36:45,640 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c99adb333162f06 with lease ID 0x4da318c3fd82aa9b: from storage DS-b4759591-da13-4f14-bc58-cb94d07d0304 node DatanodeRegistration(127.0.0.1:38489, datanodeUuid=de353a9d-f890-4e48-bfa2-6b73e3d39066, infoPort=46029, infoSecurePort=0, ipcPort=43461, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:45,746 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data10/current/BP-300744400-172.17.0.3-1732246589693/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:45,746 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data9/current/BP-300744400-172.17.0.3-1732246589693/current, will proceed with Du for space computation calculation, 2024-11-22T03:36:45,767 WARN [Thread-852 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:36:45,770 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xad4f2bcc832239e3 with lease ID 0x4da318c3fd82aa9c: Processing first storage report for DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9 from datanode DatanodeRegistration(127.0.0.1:40895, datanodeUuid=890419be-9dc1-4923-9d45-510c49d526e9, infoPort=39737, infoSecurePort=0, ipcPort=37037, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693) 2024-11-22T03:36:45,770 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xad4f2bcc832239e3 with lease ID 0x4da318c3fd82aa9c: from storage DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9 node DatanodeRegistration(127.0.0.1:40895, datanodeUuid=890419be-9dc1-4923-9d45-510c49d526e9, infoPort=39737, infoSecurePort=0, ipcPort=37037, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:45,770 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xad4f2bcc832239e3 with lease ID 0x4da318c3fd82aa9c: Processing first storage report for DS-e2136771-994b-427f-8e19-36cedb7c8a0a from datanode DatanodeRegistration(127.0.0.1:40895, datanodeUuid=890419be-9dc1-4923-9d45-510c49d526e9, infoPort=39737, infoSecurePort=0, ipcPort=37037, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693) 2024-11-22T03:36:45,770 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xad4f2bcc832239e3 with lease ID 0x4da318c3fd82aa9c: from storage DS-e2136771-994b-427f-8e19-36cedb7c8a0a node DatanodeRegistration(127.0.0.1:40895, datanodeUuid=890419be-9dc1-4923-9d45-510c49d526e9, infoPort=39737, infoSecurePort=0, ipcPort=37037, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:45,774 WARN [ResponseProcessor for block BP-300744400-172.17.0.3-1732246589693:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-300744400-172.17.0.3-1732246589693:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,780 WARN [ResponseProcessor for block BP-300744400-172.17.0.3-1732246589693:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-300744400-172.17.0.3-1732246589693:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-300744400-172.17.0.3-1732246589693:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,780 WARN [ResponseProcessor for block BP-300744400-172.17.0.3-1732246589693:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-300744400-172.17.0.3-1732246589693:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-300744400-172.17.0.3-1732246589693:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,780 WARN [DataStreamer for file /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 block BP-300744400-172.17.0.3-1732246589693:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK], DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:45,781 WARN [DataStreamer for file /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320/b458937b0f5f%2C42831%2C1732246592320.1732246592632 block BP-300744400-172.17.0.3-1732246589693:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK], DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:45,781 WARN [DataStreamer for file /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 block BP-300744400-172.17.0.3-1732246589693:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK], DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:45,781 WARN [PacketResponder: BP-300744400-172.17.0.3-1732246589693:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35503] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:45,781 WARN [PacketResponder: BP-300744400-172.17.0.3-1732246589693:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35503] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:45,781 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1128785763_22 at /127.0.0.1:43450 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:35503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43450 dst: /127.0.0.1:35503 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:45,782 WARN [ResponseProcessor for block BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,782 WARN [DataStreamer for file /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta block BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK], DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:45,782 WARN [PacketResponder: BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35503] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:45,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_952184402_22 at /127.0.0.1:59990 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59990 dst: /127.0.0.1:46063 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:45,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:43414 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43414 dst: /127.0.0.1:35503 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:45,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:43418 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43418 dst: /127.0.0.1:35503 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:45,784 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c032901{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:45,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1128785763_22 at /127.0.0.1:60078 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60078 dst: /127.0.0.1:46063 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:45,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_952184402_22 at /127.0.0.1:43374 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43374 dst: /127.0.0.1:35503 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:45,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:60038 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60038 dst: /127.0.0.1:46063 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:45,784 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@587e9393{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:36:45,784 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:36:45,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:60032 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60032 dst: /127.0.0.1:46063 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:45,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2fc66dd1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:36:45,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34dee2df{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,STOPPED} 2024-11-22T03:36:45,789 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:36:45,789 WARN [BP-300744400-172.17.0.3-1732246589693 heartbeating to localhost/127.0.0.1:33231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:36:45,789 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:36:45,789 WARN [BP-300744400-172.17.0.3-1732246589693 heartbeating to localhost/127.0.0.1:33231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-300744400-172.17.0.3-1732246589693 (Datanode Uuid c941676d-a41a-4732-8e59-96d141820b93) service to localhost/127.0.0.1:33231 2024-11-22T03:36:45,790 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data3/current/BP-300744400-172.17.0.3-1732246589693 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:45,790 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data4/current/BP-300744400-172.17.0.3-1732246589693 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:45,790 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:36:45,791 WARN [DataStreamer for file /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 block BP-300744400-172.17.0.3-1732246589693:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,792 WARN [DataStreamer for file /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta block BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,793 WARN [DataStreamer for file /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 block BP-300744400-172.17.0.3-1732246589693:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,793 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@55b66cdb {}] datanode.DataXceiver(331): 127.0.0.1:46063:DataXceiver error processing unknown operation src: /127.0.0.1:34028 dst: /127.0.0.1:46063 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:45,794 WARN [DataStreamer for file /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320/b458937b0f5f%2C42831%2C1732246592320.1732246592632 block BP-300744400-172.17.0.3-1732246589693:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,809 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@31ec175a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:45,809 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a37e627{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:36:45,810 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:36:45,857 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@720bc7eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:36:45,857 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cadc2ac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,STOPPED} 2024-11-22T03:36:45,859 WARN [BP-300744400-172.17.0.3-1732246589693 heartbeating to localhost/127.0.0.1:33231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:36:45,859 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:36:45,859 WARN [BP-300744400-172.17.0.3-1732246589693 heartbeating to localhost/127.0.0.1:33231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-300744400-172.17.0.3-1732246589693 (Datanode Uuid da96f6bb-c5d1-42f9-b81d-70e565f7d334) service to localhost/127.0.0.1:33231 2024-11-22T03:36:45,859 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:36:45,860 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data1/current/BP-300744400-172.17.0.3-1732246589693 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:45,860 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data2/current/BP-300744400-172.17.0.3-1732246589693 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:45,860 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:36:45,865 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba., hostname=b458937b0f5f,39625,1732246592490, seqNum=2] 2024-11-22T03:36:45,867 ERROR [FSHLog-0-hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce-prefix:b458937b0f5f,39625,1732246592490 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,867 WARN [FSHLog-0-hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce-prefix:b458937b0f5f,39625,1732246592490 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,867 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C39625%2C1732246592490:(num 1732246593123) roll requested 2024-11-22T03:36:45,868 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C39625%2C1732246592490.1732246605867 2024-11-22T03:36:45,885 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:45,885 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:45,886 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:45,886 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:45,886 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:45,886 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246605867 2024-11-22T03:36:45,886 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,886 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:45,887 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-22T03:36:45,888 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-22T03:36:45,888 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 2024-11-22T03:36:45,891 WARN [IPC Server handler 4 on default port 33231 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-22T03:36:45,896 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 after 5ms 2024-11-22T03:36:45,900 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46029:46029),(127.0.0.1/127.0.0.1:39737:39737)] 2024-11-22T03:36:45,900 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 is not closed yet, will try archiving it next time 2024-11-22T03:36:46,610 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:47,858 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:47,901 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:47,902 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246605867 2024-11-22T03:36:47,903 WARN [ResponseProcessor for block BP-300744400-172.17.0.3-1732246589693:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-300744400-172.17.0.3-1732246589693:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:47,903 WARN [DataStreamer for file /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246605867 block BP-300744400-172.17.0.3-1732246589693:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]) is bad. 2024-11-22T03:36:47,904 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:39676 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:40895:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39676 dst: /127.0.0.1:40895 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:47,904 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:54284 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:38489:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54284 dst: /127.0.0.1:38489 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:47,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35a68dc4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:47,938 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1fad0376{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:36:47,938 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:36:47,938 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fa42555{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:36:47,938 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46c36b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,STOPPED} 2024-11-22T03:36:47,940 WARN [BP-300744400-172.17.0.3-1732246589693 heartbeating to localhost/127.0.0.1:33231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:36:47,940 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:36:47,940 WARN [BP-300744400-172.17.0.3-1732246589693 heartbeating to localhost/127.0.0.1:33231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-300744400-172.17.0.3-1732246589693 (Datanode Uuid de353a9d-f890-4e48-bfa2-6b73e3d39066) service to localhost/127.0.0.1:33231 2024-11-22T03:36:47,940 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:36:47,940 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data7/current/BP-300744400-172.17.0.3-1732246589693 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:47,941 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data8/current/BP-300744400-172.17.0.3-1732246589693 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:47,941 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:36:48,610 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:49,858 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:49,897 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 after 4009ms 2024-11-22T03:36:49,901 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:49,901 WARN [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]] 2024-11-22T03:36:49,902 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C39625%2C1732246592490:(num 1732246605867) roll requested 2024-11-22T03:36:49,902 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C39625%2C1732246592490.1732246609902 2024-11-22T03:36:49,905 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:49,905 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK], DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]) is bad. 2024-11-22T03:36:49,905 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741839_1021 2024-11-22T03:36:49,908 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK] 2024-11-22T03:36:49,912 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:49,913 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:49,913 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741840_1022 2024-11-22T03:36:49,913 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] 2024-11-22T03:36:49,929 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:49,929 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:49,930 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:49,930 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:49,930 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:49,930 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246605867 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246609902 2024-11-22T03:36:49,931 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337),(127.0.0.1/127.0.0.1:39737:39737)] 2024-11-22T03:36:49,931 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 is not closed yet, will try archiving it next time 2024-11-22T03:36:49,931 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246605867 is not closed yet, will try archiving it next time 2024-11-22T03:36:49,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40895 is added to blk_1073741838_1020 (size=2431) 2024-11-22T03:36:49,946 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T03:36:50,333 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 is not closed yet, will try archiving it next time 2024-11-22T03:36:50,610 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:51,783 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74561d28[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40895, datanodeUuid=890419be-9dc1-4923-9d45-510c49d526e9, infoPort=39737, infoSecurePort=0, ipcPort=37037, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741838_1020 to 127.0.0.1:46063 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:51,859 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:51,931 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:51,950 WARN [ResponseProcessor for block BP-300744400-172.17.0.3-1732246589693:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-300744400-172.17.0.3-1732246589693:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-300744400-172.17.0.3-1732246589693:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:51,950 WARN [DataStreamer for file /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246609902 block BP-300744400-172.17.0.3-1732246589693:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:36:51,950 WARN [PacketResponder: BP-300744400-172.17.0.3-1732246589693:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40895] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:51,951 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:32772 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32772 dst: /127.0.0.1:33343 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:51,951 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:39704 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:40895:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39704 dst: /127.0.0.1:40895 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:51,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78e0bf7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:52,000 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@232ae44d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:36:52,000 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:36:52,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10f48a0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:36:52,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@90741e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,STOPPED} 2024-11-22T03:36:52,002 WARN [BP-300744400-172.17.0.3-1732246589693 heartbeating to localhost/127.0.0.1:33231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:36:52,002 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:36:52,002 WARN [BP-300744400-172.17.0.3-1732246589693 heartbeating to localhost/127.0.0.1:33231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-300744400-172.17.0.3-1732246589693 (Datanode Uuid 890419be-9dc1-4923-9d45-510c49d526e9) service to localhost/127.0.0.1:33231 2024-11-22T03:36:52,002 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:36:52,003 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data9/current/BP-300744400-172.17.0.3-1732246589693 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:52,003 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data10/current/BP-300744400-172.17.0.3-1732246589693 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:36:52,004 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:36:52,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] regionserver.HRegion(8855): Flush requested on aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:52,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa337c81b1b655cad878320c92b169ba 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:36:52,037 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/6bd94e484c2a440bb61c2773d9d663a9 is 1080, key is row0002/info:/1732246607943/Put/seqid=0 2024-11-22T03:36:52,039 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:52,039 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]) is bad. 2024-11-22T03:36:52,039 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741842_1025 2024-11-22T03:36:52,040 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK] 2024-11-22T03:36:52,041 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:52,041 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK], DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]) is bad. 2024-11-22T03:36:52,041 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741843_1026 2024-11-22T03:36:52,042 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK] 2024-11-22T03:36:52,044 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:52,044 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:52,044 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741844_1027 2024-11-22T03:36:52,045 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] 2024-11-22T03:36:52,048 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40895 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:52,048 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38180 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6]'}, localName='127.0.0.1:33343', datanodeUuid='d7569e07-f53e-44f6-ac1a-4b4f451d2c43', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741845_1028 to mirror 127.0.0.1:40895 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:52,048 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:36:52,048 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741845_1028 2024-11-22T03:36:52,048 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38180 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:36:52,048 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38180 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38180 dst: /127.0.0.1:33343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:52,049 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:36:52,050 WARN [IPC Server handler 2 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:36:52,050 WARN [IPC Server handler 2 on default port 33231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:36:52,050 WARN [IPC Server handler 2 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:36:52,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741846_1029 (size=10347) 2024-11-22T03:36:52,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/6bd94e484c2a440bb61c2773d9d663a9 2024-11-22T03:36:52,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/6bd94e484c2a440bb61c2773d9d663a9 as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/6bd94e484c2a440bb61c2773d9d663a9 2024-11-22T03:36:52,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/6bd94e484c2a440bb61c2773d9d663a9, entries=5, sequenceid=11, filesize=10.1 K 2024-11-22T03:36:52,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for aa337c81b1b655cad878320c92b169ba in 457ms, sequenceid=11, compaction requested=false 2024-11-22T03:36:52,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa337c81b1b655cad878320c92b169ba: 2024-11-22T03:36:52,611 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:52,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] regionserver.HRegion(8855): Flush requested on aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:52,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa337c81b1b655cad878320c92b169ba 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-22T03:36:52,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/dd270ab1ebf947f5adde1a48bde48653 is 1080, key is row0007/info:/1732246612017/Put/seqid=0 2024-11-22T03:36:52,655 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:52,655 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK], DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:52,655 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741847_1030 2024-11-22T03:36:52,656 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] 2024-11-22T03:36:52,657 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:52,658 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:36:52,658 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741848_1031 2024-11-22T03:36:52,658 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:36:52,660 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:52,660 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK], DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]) is bad. 2024-11-22T03:36:52,660 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741849_1032 2024-11-22T03:36:52,661 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK] 2024-11-22T03:36:52,662 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:52,662 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]) is bad. 2024-11-22T03:36:52,662 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741850_1033 2024-11-22T03:36:52,663 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK] 2024-11-22T03:36:52,664 WARN [IPC Server handler 3 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:36:52,664 WARN [IPC Server handler 3 on default port 33231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:36:52,664 WARN [IPC Server handler 3 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:36:52,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741851_1034 (size=12506) 2024-11-22T03:36:53,075 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/dd270ab1ebf947f5adde1a48bde48653 2024-11-22T03:36:53,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/dd270ab1ebf947f5adde1a48bde48653 as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/dd270ab1ebf947f5adde1a48bde48653 2024-11-22T03:36:53,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/dd270ab1ebf947f5adde1a48bde48653, entries=7, sequenceid=24, filesize=12.2 K 2024-11-22T03:36:53,090 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for aa337c81b1b655cad878320c92b169ba in 442ms, sequenceid=24, compaction requested=false 2024-11-22T03:36:53,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa337c81b1b655cad878320c92b169ba: 2024-11-22T03:36:53,091 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-22T03:36:53,091 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:53,091 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/dd270ab1ebf947f5adde1a48bde48653 because midkey is the same as first or last row 2024-11-22T03:36:53,859 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:53,931 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:53,932 WARN [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]] 2024-11-22T03:36:53,932 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C39625%2C1732246592490:(num 1732246609902) roll requested 2024-11-22T03:36:53,932 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C39625%2C1732246592490.1732246613932 2024-11-22T03:36:53,935 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:53,935 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK], DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:36:53,935 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741852_1035 2024-11-22T03:36:53,936 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:36:53,938 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35503 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:53,938 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38210 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6]'}, localName='127.0.0.1:33343', datanodeUuid='d7569e07-f53e-44f6-ac1a-4b4f451d2c43', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741853_1036 to mirror 127.0.0.1:35503 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:53,938 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:53,938 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741853_1036 2024-11-22T03:36:53,938 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38210 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T03:36:53,939 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38210 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38210 dst: /127.0.0.1:33343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:53,939 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] 2024-11-22T03:36:53,940 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:53,940 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK], DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]) is bad. 2024-11-22T03:36:53,940 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741854_1037 2024-11-22T03:36:53,941 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK] 2024-11-22T03:36:53,943 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:53,943 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]) is bad. 2024-11-22T03:36:53,943 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741855_1038 2024-11-22T03:36:53,944 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK] 2024-11-22T03:36:53,945 WARN [IPC Server handler 0 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:36:53,945 WARN [IPC Server handler 0 on default port 33231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:36:53,945 WARN [IPC Server handler 0 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:36:53,948 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:53,948 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:53,948 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:53,948 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:53,948 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:53,948 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246609902 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246613932 2024-11-22T03:36:53,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741841_1024 (size=25992) 2024-11-22T03:36:53,950 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337)] 2024-11-22T03:36:53,950 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 is not closed yet, will try archiving it next time 2024-11-22T03:36:53,950 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246609902 is not closed yet, will try archiving it next time 2024-11-22T03:36:53,950 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246605867 to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/oldWALs/b458937b0f5f%2C39625%2C1732246592490.1732246605867 2024-11-22T03:36:54,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] regionserver.HRegion(8855): Flush requested on aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:54,078 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa337c81b1b655cad878320c92b169ba 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T03:36:54,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/e8ce9785dd184a63aab2151ba0be7f89 is 1079, key is tmprow/info:/1732246614076/Put/seqid=0 2024-11-22T03:36:54,088 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38489 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:54,088 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38220 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6]'}, localName='127.0.0.1:33343', datanodeUuid='d7569e07-f53e-44f6-ac1a-4b4f451d2c43', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741857_1040 to mirror 127.0.0.1:38489 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:54,088 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]) is bad. 2024-11-22T03:36:54,088 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741857_1040 2024-11-22T03:36:54,088 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38220 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:36:54,088 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38220 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38220 dst: /127.0.0.1:33343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:54,089 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK] 2024-11-22T03:36:54,090 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:54,090 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:54,090 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741858_1041 2024-11-22T03:36:54,091 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] 2024-11-22T03:36:54,093 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46063 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:54,093 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38226 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6]'}, localName='127.0.0.1:33343', datanodeUuid='d7569e07-f53e-44f6-ac1a-4b4f451d2c43', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741859_1042 to mirror 127.0.0.1:46063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:54,093 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]) is bad. 2024-11-22T03:36:54,094 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741859_1042 2024-11-22T03:36:54,094 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38226 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:36:54,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38226 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38226 dst: /127.0.0.1:33343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:54,094 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK] 2024-11-22T03:36:54,095 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:54,096 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:36:54,096 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741860_1043 2024-11-22T03:36:54,096 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:36:54,097 WARN [IPC Server handler 1 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:36:54,097 WARN [IPC Server handler 1 on default port 33231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:36:54,097 WARN [IPC Server handler 1 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:36:54,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741861_1044 (size=6027) 2024-11-22T03:36:54,351 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 is not closed yet, will try archiving it next time 2024-11-22T03:36:54,417 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ba52475[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741846_1029 to 127.0.0.1:38489 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:54,418 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@562db71[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741851_1034 to 127.0.0.1:38489 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:54,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/e8ce9785dd184a63aab2151ba0be7f89 2024-11-22T03:36:54,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/e8ce9785dd184a63aab2151ba0be7f89 as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/e8ce9785dd184a63aab2151ba0be7f89 2024-11-22T03:36:54,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/e8ce9785dd184a63aab2151ba0be7f89, entries=1, sequenceid=34, filesize=5.9 K 2024-11-22T03:36:54,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for aa337c81b1b655cad878320c92b169ba in 440ms, sequenceid=34, compaction requested=true 2024-11-22T03:36:54,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa337c81b1b655cad878320c92b169ba: 2024-11-22T03:36:54,518 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-22T03:36:54,518 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:54,518 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/dd270ab1ebf947f5adde1a48bde48653 because midkey is the same as first or last row 2024-11-22T03:36:54,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store aa337c81b1b655cad878320c92b169ba:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:36:54,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:36:54,519 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:36:54,520 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:36:54,520 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.HStore(1541): aa337c81b1b655cad878320c92b169ba/info is initiating minor compaction (all files) 2024-11-22T03:36:54,520 INFO [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of aa337c81b1b655cad878320c92b169ba/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:36:54,520 INFO [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/6bd94e484c2a440bb61c2773d9d663a9, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/dd270ab1ebf947f5adde1a48bde48653, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/e8ce9785dd184a63aab2151ba0be7f89] into tmpdir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp, totalSize=28.2 K 2024-11-22T03:36:54,521 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6bd94e484c2a440bb61c2773d9d663a9, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732246607943 2024-11-22T03:36:54,521 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] compactions.Compactor(225): Compacting dd270ab1ebf947f5adde1a48bde48653, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732246612017 2024-11-22T03:36:54,522 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] compactions.Compactor(225): Compacting e8ce9785dd184a63aab2151ba0be7f89, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732246614076 2024-11-22T03:36:54,537 INFO [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): aa337c81b1b655cad878320c92b169ba#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:36:54,538 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/8c4a87ffbd084f2d94e6a428f8bf1bfd is 1080, key is row0002/info:/1732246607943/Put/seqid=0 2024-11-22T03:36:54,541 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35503 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:54,540 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38254 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6]'}, localName='127.0.0.1:33343', datanodeUuid='d7569e07-f53e-44f6-ac1a-4b4f451d2c43', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741862_1045 to mirror 127.0.0.1:35503 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:54,541 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:54,541 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741862_1045 2024-11-22T03:36:54,541 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38254 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:36:54,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38254 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38254 dst: /127.0.0.1:33343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:54,541 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] 2024-11-22T03:36:54,543 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:54,543 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK], DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:36:54,543 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741863_1046 2024-11-22T03:36:54,544 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:36:54,546 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38258 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6]'}, localName='127.0.0.1:33343', datanodeUuid='d7569e07-f53e-44f6-ac1a-4b4f451d2c43', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741864_1047 to mirror 127.0.0.1:46063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:54,546 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46063 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:54,546 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38258 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:36:54,546 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]) is bad. 2024-11-22T03:36:54,546 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741864_1047 2024-11-22T03:36:54,546 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38258 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38258 dst: /127.0.0.1:33343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:54,547 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK] 2024-11-22T03:36:54,550 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38489 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:54,550 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38270 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6]'}, localName='127.0.0.1:33343', datanodeUuid='d7569e07-f53e-44f6-ac1a-4b4f451d2c43', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741865_1048 to mirror 127.0.0.1:38489 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:54,550 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]) is bad. 2024-11-22T03:36:54,550 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741865_1048 2024-11-22T03:36:54,550 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38270 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:36:54,551 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38270 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38270 dst: /127.0.0.1:33343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:54,551 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK] 2024-11-22T03:36:54,552 WARN [IPC Server handler 0 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:36:54,552 WARN [IPC Server handler 0 on default port 33231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:36:54,552 WARN [IPC Server handler 0 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:36:54,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741866_1049 (size=17994) 2024-11-22T03:36:54,611 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:54,970 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/8c4a87ffbd084f2d94e6a428f8bf1bfd as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/8c4a87ffbd084f2d94e6a428f8bf1bfd 2024-11-22T03:36:54,979 INFO [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in aa337c81b1b655cad878320c92b169ba/info of aa337c81b1b655cad878320c92b169ba into 8c4a87ffbd084f2d94e6a428f8bf1bfd(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:36:54,979 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for aa337c81b1b655cad878320c92b169ba: 2024-11-22T03:36:54,980 INFO [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba., storeName=aa337c81b1b655cad878320c92b169ba/info, priority=13, startTime=1732246614518; duration=0sec 2024-11-22T03:36:54,980 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T03:36:54,980 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:54,980 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/8c4a87ffbd084f2d94e6a428f8bf1bfd because midkey is the same as first or last row 2024-11-22T03:36:54,980 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T03:36:54,980 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:54,980 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/8c4a87ffbd084f2d94e6a428f8bf1bfd because midkey is the same as first or last row 2024-11-22T03:36:54,981 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T03:36:54,981 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:54,981 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/8c4a87ffbd084f2d94e6a428f8bf1bfd because midkey is the same as first or last row 2024-11-22T03:36:54,981 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:36:54,981 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: aa337c81b1b655cad878320c92b169ba:info 2024-11-22T03:36:55,418 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@562db71[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741841_1024 to 127.0.0.1:40895 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:55,418 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ba52475[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741861_1044 to 127.0.0.1:46063 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:55,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] regionserver.HRegion(8855): Flush requested on aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:55,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa337c81b1b655cad878320c92b169ba 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T03:36:55,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/b4b52727da934797bef10e0ede2485c1 is 1079, key is tmprow/info:/1732246615500/Put/seqid=0 2024-11-22T03:36:55,510 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:55,510 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]) is bad. 2024-11-22T03:36:55,510 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741867_1050 2024-11-22T03:36:55,511 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK] 2024-11-22T03:36:55,512 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:55,512 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]) is bad. 2024-11-22T03:36:55,512 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741868_1051 2024-11-22T03:36:55,513 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK] 2024-11-22T03:36:55,514 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:55,514 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK], DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:36:55,514 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741869_1052 2024-11-22T03:36:55,515 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:36:55,518 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35503 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:55,518 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38292 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6]'}, localName='127.0.0.1:33343', datanodeUuid='d7569e07-f53e-44f6-ac1a-4b4f451d2c43', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741870_1053 to mirror 127.0.0.1:35503 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:55,518 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:55,518 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741870_1053 2024-11-22T03:36:55,518 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38292 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:36:55,519 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38292 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38292 dst: /127.0.0.1:33343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:55,519 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] 2024-11-22T03:36:55,520 WARN [IPC Server handler 1 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:36:55,520 WARN [IPC Server handler 1 on default port 33231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:36:55,520 WARN [IPC Server handler 1 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:36:55,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741871_1054 (size=6027) 2024-11-22T03:36:55,859 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:55,924 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/b4b52727da934797bef10e0ede2485c1 2024-11-22T03:36:55,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/b4b52727da934797bef10e0ede2485c1 as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/b4b52727da934797bef10e0ede2485c1 2024-11-22T03:36:55,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/b4b52727da934797bef10e0ede2485c1, entries=1, sequenceid=45, filesize=5.9 K 2024-11-22T03:36:55,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for aa337c81b1b655cad878320c92b169ba in 441ms, sequenceid=45, compaction requested=false 2024-11-22T03:36:55,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa337c81b1b655cad878320c92b169ba: 2024-11-22T03:36:55,944 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-22T03:36:55,944 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:55,944 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/8c4a87ffbd084f2d94e6a428f8bf1bfd because midkey is the same as first or last row 2024-11-22T03:36:55,950 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:55,951 WARN [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]] 2024-11-22T03:36:55,951 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C39625%2C1732246592490:(num 1732246613932) roll requested 2024-11-22T03:36:55,951 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C39625%2C1732246592490.1732246615951 2024-11-22T03:36:55,954 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:55,954 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:55,955 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741872_1055 2024-11-22T03:36:55,955 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] 2024-11-22T03:36:55,957 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38489 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:55,957 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38322 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6]'}, localName='127.0.0.1:33343', datanodeUuid='d7569e07-f53e-44f6-ac1a-4b4f451d2c43', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741873_1056 to mirror 127.0.0.1:38489 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:55,958 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]) is bad. 2024-11-22T03:36:55,958 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741873_1056 2024-11-22T03:36:55,958 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38322 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T03:36:55,958 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38322 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38322 dst: /127.0.0.1:33343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:55,958 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK] 2024-11-22T03:36:55,960 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:55,960 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]) is bad. 2024-11-22T03:36:55,960 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741874_1057 2024-11-22T03:36:55,961 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK] 2024-11-22T03:36:55,962 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:55,962 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:36:55,962 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741875_1058 2024-11-22T03:36:55,963 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:36:55,964 WARN [IPC Server handler 4 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:36:55,964 WARN [IPC Server handler 4 on default port 33231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:36:55,964 WARN [IPC Server handler 4 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:36:55,966 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:55,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:55,967 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:55,967 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:55,967 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:36:55,967 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246613932 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246615951 2024-11-22T03:36:55,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741856_1039 (size=13591) 2024-11-22T03:36:55,972 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337)] 2024-11-22T03:36:55,972 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 is not closed yet, will try archiving it next time 2024-11-22T03:36:55,972 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246613932 is not closed yet, will try archiving it next time 2024-11-22T03:36:55,972 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246609902 to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/oldWALs/b458937b0f5f%2C39625%2C1732246592490.1732246609902 2024-11-22T03:36:56,369 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 is not closed yet, will try archiving it next time 2024-11-22T03:36:56,612 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:56,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] regionserver.HRegion(8855): Flush requested on aa337c81b1b655cad878320c92b169ba 2024-11-22T03:36:56,922 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa337c81b1b655cad878320c92b169ba 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T03:36:56,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/9abdf24b81fa4332bd163d078c2c5274 is 1079, key is tmprow/info:/1732246616921/Put/seqid=0 2024-11-22T03:36:56,930 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:56,930 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]) is bad. 2024-11-22T03:36:56,931 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741877_1060 2024-11-22T03:36:56,931 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK] 2024-11-22T03:36:56,933 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:56,933 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]) is bad. 2024-11-22T03:36:56,933 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741878_1061 2024-11-22T03:36:56,933 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK] 2024-11-22T03:36:56,935 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:56,935 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:36:56,935 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741879_1062 2024-11-22T03:36:56,935 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:36:56,938 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35503 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:56,938 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38338 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6]'}, localName='127.0.0.1:33343', datanodeUuid='d7569e07-f53e-44f6-ac1a-4b4f451d2c43', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741880_1063 to mirror 127.0.0.1:35503 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:56,938 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:56,938 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741880_1063 2024-11-22T03:36:56,938 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38338 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:36:56,938 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:38338 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38338 dst: /127.0.0.1:33343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:56,939 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] 2024-11-22T03:36:56,940 WARN [IPC Server handler 4 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:36:56,940 WARN [IPC Server handler 4 on default port 33231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:36:56,940 WARN [IPC Server handler 4 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:36:56,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741881_1064 (size=6027) 2024-11-22T03:36:57,344 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/9abdf24b81fa4332bd163d078c2c5274 2024-11-22T03:36:57,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/9abdf24b81fa4332bd163d078c2c5274 as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/9abdf24b81fa4332bd163d078c2c5274 2024-11-22T03:36:57,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/9abdf24b81fa4332bd163d078c2c5274, entries=1, sequenceid=55, filesize=5.9 K 2024-11-22T03:36:57,361 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for aa337c81b1b655cad878320c92b169ba in 439ms, sequenceid=55, compaction requested=true 2024-11-22T03:36:57,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa337c81b1b655cad878320c92b169ba: 2024-11-22T03:36:57,361 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-22T03:36:57,361 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:57,361 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/8c4a87ffbd084f2d94e6a428f8bf1bfd because midkey is the same as first or last row 2024-11-22T03:36:57,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store aa337c81b1b655cad878320c92b169ba:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:36:57,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:36:57,362 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:36:57,363 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:36:57,363 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.HStore(1541): aa337c81b1b655cad878320c92b169ba/info is initiating minor compaction (all files) 2024-11-22T03:36:57,363 INFO [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of aa337c81b1b655cad878320c92b169ba/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:36:57,364 INFO [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/8c4a87ffbd084f2d94e6a428f8bf1bfd, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/b4b52727da934797bef10e0ede2485c1, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/9abdf24b81fa4332bd163d078c2c5274] into tmpdir=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp, totalSize=29.3 K 2024-11-22T03:36:57,364 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8c4a87ffbd084f2d94e6a428f8bf1bfd, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732246607943 2024-11-22T03:36:57,365 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] compactions.Compactor(225): Compacting b4b52727da934797bef10e0ede2485c1, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732246615500 2024-11-22T03:36:57,365 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9abdf24b81fa4332bd163d078c2c5274, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732246616921 2024-11-22T03:36:57,383 INFO [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): aa337c81b1b655cad878320c92b169ba#info#compaction#24 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:36:57,383 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/68b395e5a63b49c99ccaadd92453bfc4 is 1080, key is row0002/info:/1732246607943/Put/seqid=0 2024-11-22T03:36:57,386 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:57,386 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:36:57,386 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741882_1065 2024-11-22T03:36:57,387 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:36:57,388 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:57,388 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK], DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK]) is bad. 2024-11-22T03:36:57,388 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741883_1066 2024-11-22T03:36:57,389 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35503,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK] 2024-11-22T03:36:57,390 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:57,390 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]) is bad. 2024-11-22T03:36:57,391 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741884_1067 2024-11-22T03:36:57,391 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK] 2024-11-22T03:36:57,393 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:57,393 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]) is bad. 2024-11-22T03:36:57,393 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741885_1068 2024-11-22T03:36:57,394 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK] 2024-11-22T03:36:57,395 WARN [IPC Server handler 4 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:36:57,395 WARN [IPC Server handler 4 on default port 33231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:36:57,395 WARN [IPC Server handler 4 on default port 33231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:36:57,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741886_1069 (size=18097) 2024-11-22T03:36:57,418 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ba52475[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741866_1049 to 127.0.0.1:40895 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:57,418 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@562db71[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741871_1054 to 127.0.0.1:38489 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:57,810 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/68b395e5a63b49c99ccaadd92453bfc4 as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/68b395e5a63b49c99ccaadd92453bfc4 2024-11-22T03:36:57,820 INFO [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in aa337c81b1b655cad878320c92b169ba/info of aa337c81b1b655cad878320c92b169ba into 68b395e5a63b49c99ccaadd92453bfc4(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:36:57,820 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for aa337c81b1b655cad878320c92b169ba: 2024-11-22T03:36:57,820 INFO [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba., storeName=aa337c81b1b655cad878320c92b169ba/info, priority=13, startTime=1732246617361; duration=0sec 2024-11-22T03:36:57,820 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-22T03:36:57,820 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:57,820 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/68b395e5a63b49c99ccaadd92453bfc4 because midkey is the same as first or last row 2024-11-22T03:36:57,820 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-22T03:36:57,820 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:57,820 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/68b395e5a63b49c99ccaadd92453bfc4 because midkey is the same as first or last row 2024-11-22T03:36:57,820 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-22T03:36:57,820 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:36:57,820 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/68b395e5a63b49c99ccaadd92453bfc4 because midkey is the same as first or last row 2024-11-22T03:36:57,821 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:36:57,821 DEBUG [RS:0;b458937b0f5f:39625-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: aa337c81b1b655cad878320c92b169ba:info 2024-11-22T03:36:57,860 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:57,973 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:57,973 WARN [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-22T03:36:58,148 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:36:58,153 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:36:58,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:36:58,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:36:58,154 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:36:58,154 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a4db74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:36:58,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4091ef21{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:36:58,256 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f60aba7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/java.io.tmpdir/jetty-localhost-40925-hadoop-hdfs-3_4_1-tests_jar-_-any-9245479214995422141/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:36:58,256 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1bc2fb05{HTTP/1.1, (http/1.1)}{localhost:40925} 2024-11-22T03:36:58,256 INFO [Time-limited test {}] server.Server(415): Started @137674ms 2024-11-22T03:36:58,257 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:36:58,419 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ba52475[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741881_1064 to 127.0.0.1:46063 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:58,419 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@562db71[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741856_1039 to 127.0.0.1:38489 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:36:58,612 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:58,721 WARN [Thread-987 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:36:58,733 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4245f35fca21a7 with lease ID 0x4da318c3fd82aa9d: from storage DS-72b5788c-7e29-42d6-878e-2bd50f6714bb node DatanodeRegistration(127.0.0.1:41553, datanodeUuid=c941676d-a41a-4732-8e59-96d141820b93, infoPort=44299, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T03:36:58,734 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4245f35fca21a7 with lease ID 0x4da318c3fd82aa9d: from storage DS-134a0a23-f9c0-4c88-b7ea-2ac2f7297d0d node DatanodeRegistration(127.0.0.1:41553, datanodeUuid=c941676d-a41a-4732-8e59-96d141820b93, infoPort=44299, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:36:59,860 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:36:59,973 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:00,419 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@562db71[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33343, datanodeUuid=d7569e07-f53e-44f6-ac1a-4b4f451d2c43, infoPort=35337, infoSecurePort=0, ipcPort=41759, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741886_1069 to 127.0.0.1:38489 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:00,612 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:01,861 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:01,974 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:02,297 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:37:02,613 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:02,853 ERROR [FSHLog-0-hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData-prefix:b458937b0f5f,42831,1732246592320 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:02,853 WARN [FSHLog-0-hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData-prefix:b458937b0f5f,42831,1732246592320 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:02,853 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C42831%2C1732246592320:(num 1732246592632) roll requested 2024-11-22T03:37:02,854 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C42831%2C1732246592320.1732246622854 2024-11-22T03:37:02,858 WARN [Thread-1007 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:02,859 WARN [Thread-1007 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK]) is bad. 2024-11-22T03:37:02,859 WARN [Thread-1007 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741887_1070 2024-11-22T03:37:02,860 WARN [Thread-1007 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38489,DS-ec58b9f1-b43e-4d0b-a5da-a22125366a20,DISK] 2024-11-22T03:37:02,863 WARN [Thread-1007 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40895 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:02,863 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_952184402_22 at /127.0.0.1:42630 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data4]'}, localName='127.0.0.1:41553', datanodeUuid='c941676d-a41a-4732-8e59-96d141820b93', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741888_1071 to mirror 127.0.0.1:40895 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:02,864 WARN [Thread-1007 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41553,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:37:02,864 WARN [Thread-1007 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741888_1071 2024-11-22T03:37:02,864 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_952184402_22 at /127.0.0.1:42630 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T03:37:02,864 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_952184402_22 at /127.0.0.1:42630 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:41553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42630 dst: /127.0.0.1:41553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:02,865 WARN [Thread-1007 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:37:02,875 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:02,875 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:02,875 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:02,875 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:02,875 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:02,876 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320/b458937b0f5f%2C42831%2C1732246592320.1732246592632 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320/b458937b0f5f%2C42831%2C1732246592320.1732246622854 2024-11-22T03:37:02,876 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:02,876 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:02,876 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320/b458937b0f5f%2C42831%2C1732246592320.1732246592632 2024-11-22T03:37:02,876 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44299:44299),(127.0.0.1/127.0.0.1:35337:35337)] 2024-11-22T03:37:02,876 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320/b458937b0f5f%2C42831%2C1732246592320.1732246592632 is not closed yet, will try archiving it next time 2024-11-22T03:37:02,877 WARN [IPC Server handler 2 on default port 33231 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320/b458937b0f5f%2C42831%2C1732246592320.1732246592632 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1006 2024-11-22T03:37:02,877 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320/b458937b0f5f%2C42831%2C1732246592320.1732246592632 after 1ms 2024-11-22T03:37:03,861 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:03,974 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:05,861 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:05,974 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:06,879 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320/b458937b0f5f%2C42831%2C1732246592320.1732246592632 after 4003ms 2024-11-22T03:37:07,862 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:07,975 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:08,751 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3369083d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-300744400-172.17.0.3-1732246589693:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:46063,null,null]) java.net.ConnectException: Call From b458937b0f5f/172.17.0.3 to localhost:35355 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T03:37:08,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741833_1019 (size=455) 2024-11-22T03:37:08,916 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246593123 to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/oldWALs/b458937b0f5f%2C39625%2C1732246592490.1732246593123 2024-11-22T03:37:08,917 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246613932 to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/oldWALs/b458937b0f5f%2C39625%2C1732246592490.1732246613932 2024-11-22T03:37:09,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741833_1019 (size=455) 2024-11-22T03:37:09,862 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:09,975 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:11,799 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C39625%2C1732246592490.1732246631798 2024-11-22T03:37:11,809 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:11,810 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:11,810 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:11,810 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:11,810 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:11,810 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246615951 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246631798 2024-11-22T03:37:11,811 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337),(127.0.0.1/127.0.0.1:44299:44299)] 2024-11-22T03:37:11,811 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246615951 is not closed yet, will try archiving it next time 2024-11-22T03:37:11,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741876_1059 (size=12911) 2024-11-22T03:37:11,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] regionserver.HRegion(8855): Flush requested on aa337c81b1b655cad878320c92b169ba 2024-11-22T03:37:11,816 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa337c81b1b655cad878320c92b169ba 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T03:37:11,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/1c69bb4a677c49b58e75b20edb91f04e is 1080, key is row0013/info:/1732246631812/Put/seqid=0 2024-11-22T03:37:11,824 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40895 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:11,824 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:42242 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6]'}, localName='127.0.0.1:33343', datanodeUuid='d7569e07-f53e-44f6-ac1a-4b4f451d2c43', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741891_1075 to mirror 127.0.0.1:40895 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:11,825 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:37:11,825 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741891_1075 2024-11-22T03:37:11,825 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:42242 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:37:11,825 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:42242 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42242 dst: /127.0.0.1:33343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:11,825 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:37:11,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741892_1076 (size=8190) 2024-11-22T03:37:11,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741892_1076 (size=8190) 2024-11-22T03:37:11,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/1c69bb4a677c49b58e75b20edb91f04e 2024-11-22T03:37:11,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/1c69bb4a677c49b58e75b20edb91f04e as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/1c69bb4a677c49b58e75b20edb91f04e 2024-11-22T03:37:11,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/1c69bb4a677c49b58e75b20edb91f04e, entries=3, sequenceid=66, filesize=8.0 K 2024-11-22T03:37:11,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for aa337c81b1b655cad878320c92b169ba in 36ms, sequenceid=66, compaction requested=false 2024-11-22T03:37:11,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa337c81b1b655cad878320c92b169ba: 2024-11-22T03:37:11,853 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-22T03:37:11,853 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:37:11,853 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/68b395e5a63b49c99ccaadd92453bfc4 because midkey is the same as first or last row 2024-11-22T03:37:11,863 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:11,976 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:11,976 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-22T03:37:12,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:37:12,041 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:37:12,042 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:37:12,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:12,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:12,042 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:37:12,043 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:37:12,043 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2077964693, stopped=false 2024-11-22T03:37:12,043 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b458937b0f5f,42831,1732246592320 2024-11-22T03:37:12,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:37:12,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43975-0x101609d56d70002, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:37:12,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:12,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43975-0x101609d56d70002, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:12,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:37:12,111 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:37:12,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:12,112 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:37:12,112 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:37:12,112 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:12,112 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:37:12,112 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b458937b0f5f,39625,1732246592490' ***** 2024-11-22T03:37:12,113 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:37:12,113 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b458937b0f5f,43975,1732246593751' ***** 2024-11-22T03:37:12,113 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:37:12,113 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:37:12,113 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:37:12,113 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:37:12,113 INFO [RS:1;b458937b0f5f:43975 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:37:12,113 INFO [RS:1;b458937b0f5f:43975 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:37:12,113 INFO [RS:0;b458937b0f5f:39625 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:37:12,113 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:37:12,114 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.HRegionServer(959): stopping server b458937b0f5f,43975,1732246593751 2024-11-22T03:37:12,114 INFO [RS:0;b458937b0f5f:39625 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:37:12,114 INFO [RS:1;b458937b0f5f:43975 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:37:12,114 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(3091): Received CLOSE for aa337c81b1b655cad878320c92b169ba 2024-11-22T03:37:12,114 INFO [RS:1;b458937b0f5f:43975 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;b458937b0f5f:43975. 2024-11-22T03:37:12,114 DEBUG [RS:1;b458937b0f5f:43975 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:37:12,114 DEBUG [RS:1;b458937b0f5f:43975 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:12,114 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.HRegionServer(976): stopping server b458937b0f5f,43975,1732246593751; all regions closed. 2024-11-22T03:37:12,114 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43975-0x101609d56d70002, quorum=127.0.0.1:57543, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:37:12,115 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(959): stopping server b458937b0f5f,39625,1732246592490 2024-11-22T03:37:12,115 INFO [RS:0;b458937b0f5f:39625 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:37:12,115 INFO [RS:0;b458937b0f5f:39625 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b458937b0f5f:39625. 2024-11-22T03:37:12,115 DEBUG [RS:0;b458937b0f5f:39625 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:37:12,115 DEBUG [RS:0;b458937b0f5f:39625 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:12,115 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,115 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,115 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:37:12,115 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,115 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:37:12,115 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:37:12,115 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,115 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing aa337c81b1b655cad878320c92b169ba, disabling compactions & flushes 2024-11-22T03:37:12,116 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:37:12,116 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:37:12,116 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,116 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:37:12,116 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. after waiting 0 ms 2024-11-22T03:37:12,116 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:37:12,116 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T03:37:12,116 DEBUG [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, aa337c81b1b655cad878320c92b169ba=TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.} 2024-11-22T03:37:12,116 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:37:12,116 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:37:12,116 DEBUG [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, aa337c81b1b655cad878320c92b169ba 2024-11-22T03:37:12,116 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:37:12,116 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing aa337c81b1b655cad878320c92b169ba 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-22T03:37:12,116 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:37:12,116 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:37:12,116 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:37:12,116 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-22T03:37:12,117 ERROR [FSHLog-0-hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce-prefix:b458937b0f5f,39625,1732246592490.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:12,117 WARN [FSHLog-0-hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce-prefix:b458937b0f5f,39625,1732246592490.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:12,117 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C39625%2C1732246592490.meta:.meta(num 1732246593580) roll requested 2024-11-22T03:37:12,117 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C39625%2C1732246592490.meta.1732246632117.meta 2024-11-22T03:37:12,120 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:12,120 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:12,121 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 2024-11-22T03:37:12,121 WARN [IPC Server handler 3 on default port 33231 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741837_1013 2024-11-22T03:37:12,121 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 after 0ms 2024-11-22T03:37:12,127 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/dd3789b552fb41bdb24773682afa92fd is 1080, key is row0015/info:/1732246631817/Put/seqid=0 2024-11-22T03:37:12,128 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,128 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,128 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,128 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,128 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,129 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246632117.meta 2024-11-22T03:37:12,129 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:12,129 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46063,DS-798cc6a3-858c-4921-ace9-e9c509aa79d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:12,129 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta 2024-11-22T03:37:12,129 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337),(127.0.0.1/127.0.0.1:44299:44299)] 2024-11-22T03:37:12,129 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta is not closed yet, will try archiving it next time 2024-11-22T03:37:12,130 WARN [IPC Server handler 2 on default port 33231 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741834_1010 2024-11-22T03:37:12,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:32768 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741894_1079] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6]'}, localName='127.0.0.1:33343', datanodeUuid='d7569e07-f53e-44f6-ac1a-4b4f451d2c43', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741894_1079 to mirror 127.0.0.1:40895 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:12,130 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40895 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:12,130 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:32768 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741894_1079] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:37:12,130 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:37:12,130 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741894_1079 2024-11-22T03:37:12,130 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:32768 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741894_1079] {}] datanode.DataXceiver(331): 127.0.0.1:33343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32768 dst: /127.0.0.1:33343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:12,130 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta after 1ms 2024-11-22T03:37:12,130 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:37:12,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741895_1081 (size=14660) 2024-11-22T03:37:12,136 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/dd3789b552fb41bdb24773682afa92fd 2024-11-22T03:37:12,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741895_1081 (size=14660) 2024-11-22T03:37:12,144 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/.tmp/info/dd3789b552fb41bdb24773682afa92fd as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/dd3789b552fb41bdb24773682afa92fd 2024-11-22T03:37:12,148 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/.tmp/info/e0084f32d37c4523ba58b43c7b966379 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba./info:regioninfo/1732246594254/Put/seqid=0 2024-11-22T03:37:12,149 WARN [Thread-1044 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:12,150 WARN [Thread-1044 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741896_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-f73ca267-7c11-49d8-b21d-fb4edb69881d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:37:12,150 WARN [Thread-1044 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741896_1082 2024-11-22T03:37:12,150 WARN [Thread-1044 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:37:12,151 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/dd3789b552fb41bdb24773682afa92fd, entries=9, sequenceid=78, filesize=14.3 K 2024-11-22T03:37:12,152 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for aa337c81b1b655cad878320c92b169ba in 36ms, sequenceid=78, compaction requested=true 2024-11-22T03:37:12,159 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/6bd94e484c2a440bb61c2773d9d663a9, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/dd270ab1ebf947f5adde1a48bde48653, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/8c4a87ffbd084f2d94e6a428f8bf1bfd, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/e8ce9785dd184a63aab2151ba0be7f89, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/b4b52727da934797bef10e0ede2485c1, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/9abdf24b81fa4332bd163d078c2c5274] to archive 2024-11-22T03:37:12,160 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T03:37:12,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741897_1083 (size=7089) 2024-11-22T03:37:12,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741897_1083 (size=7089) 2024-11-22T03:37:12,165 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/.tmp/info/e0084f32d37c4523ba58b43c7b966379 2024-11-22T03:37:12,167 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/6bd94e484c2a440bb61c2773d9d663a9 to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/6bd94e484c2a440bb61c2773d9d663a9 2024-11-22T03:37:12,168 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/dd270ab1ebf947f5adde1a48bde48653 to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/dd270ab1ebf947f5adde1a48bde48653 2024-11-22T03:37:12,171 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/8c4a87ffbd084f2d94e6a428f8bf1bfd to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/8c4a87ffbd084f2d94e6a428f8bf1bfd 2024-11-22T03:37:12,172 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/e8ce9785dd184a63aab2151ba0be7f89 to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/e8ce9785dd184a63aab2151ba0be7f89 2024-11-22T03:37:12,174 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/b4b52727da934797bef10e0ede2485c1 to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/b4b52727da934797bef10e0ede2485c1 2024-11-22T03:37:12,176 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/9abdf24b81fa4332bd163d078c2c5274 to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/info/9abdf24b81fa4332bd163d078c2c5274 2024-11-22T03:37:12,176 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=b458937b0f5f:42831 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-22T03:37:12,177 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6bd94e484c2a440bb61c2773d9d663a9=10347, dd270ab1ebf947f5adde1a48bde48653=12506, 8c4a87ffbd084f2d94e6a428f8bf1bfd=17994, e8ce9785dd184a63aab2151ba0be7f89=6027, b4b52727da934797bef10e0ede2485c1=6027, 9abdf24b81fa4332bd163d078c2c5274=6027] 2024-11-22T03:37:12,182 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/aa337c81b1b655cad878320c92b169ba/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-22T03:37:12,182 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:37:12,183 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for aa337c81b1b655cad878320c92b169ba: Waiting for close lock at 1732246632115Running coprocessor pre-close hooks at 1732246632115Disabling compacts and flushes for region at 1732246632115Disabling writes for close at 1732246632116 (+1 ms)Obtaining lock to block concurrent updates at 1732246632116Preparing flush snapshotting stores in aa337c81b1b655cad878320c92b169ba at 1732246632116Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732246632116Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. at 1732246632117 (+1 ms)Flushing aa337c81b1b655cad878320c92b169ba/info: creating writer at 1732246632117Flushing aa337c81b1b655cad878320c92b169ba/info: appending metadata at 1732246632126 (+9 ms)Flushing aa337c81b1b655cad878320c92b169ba/info: closing flushed file at 1732246632126Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a282583: reopening flushed file at 1732246632143 (+17 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for aa337c81b1b655cad878320c92b169ba in 36ms, sequenceid=78, compaction requested=true at 1732246632152 (+9 ms)Writing region close event to WAL at 1732246632177 (+25 ms)Running coprocessor post-close hooks at 1732246632182 (+5 ms)Closed at 1732246632182 2024-11-22T03:37:12,183 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732246593882.aa337c81b1b655cad878320c92b169ba. 2024-11-22T03:37:12,190 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/.tmp/ns/ebd0b8fbaff94cb0b8eee780ce5453f5 is 43, key is default/ns:d/1732246593673/Put/seqid=0 2024-11-22T03:37:12,193 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1084 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40895 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:12,193 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:33510 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741898_1084] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data4]'}, localName='127.0.0.1:41553', datanodeUuid='c941676d-a41a-4732-8e59-96d141820b93', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741898_1084 to mirror 127.0.0.1:40895 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:12,193 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741898_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41553,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:37:12,193 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741898_1084 2024-11-22T03:37:12,193 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:33510 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741898_1084] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:37:12,193 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:33510 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741898_1084] {}] datanode.DataXceiver(331): 127.0.0.1:41553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33510 dst: /127.0.0.1:41553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:12,194 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:37:12,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741899_1085 (size=5153) 2024-11-22T03:37:12,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741899_1085 (size=5153) 2024-11-22T03:37:12,204 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/.tmp/ns/ebd0b8fbaff94cb0b8eee780ce5453f5 2024-11-22T03:37:12,212 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.1732246615951 to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/oldWALs/b458937b0f5f%2C39625%2C1732246592490.1732246615951 2024-11-22T03:37:12,226 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/.tmp/table/9bbe2ba81acf42378604b12ffd009e59 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732246594265/Put/seqid=0 2024-11-22T03:37:12,228 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40895 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:12,228 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:33532 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741900_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data4]'}, localName='127.0.0.1:41553', datanodeUuid='c941676d-a41a-4732-8e59-96d141820b93', xmitsInProgress=0}:Exception transferring block BP-300744400-172.17.0.3-1732246589693:blk_1073741900_1086 to mirror 127.0.0.1:40895 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:12,229 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-300744400-172.17.0.3-1732246589693:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41553,DS-72b5788c-7e29-42d6-878e-2bd50f6714bb,DISK], DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK]) is bad. 2024-11-22T03:37:12,229 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-300744400-172.17.0.3-1732246589693:blk_1073741900_1086 2024-11-22T03:37:12,229 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:33532 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741900_1086] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:37:12,229 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2103057052_22 at /127.0.0.1:33532 [Receiving block BP-300744400-172.17.0.3-1732246589693:blk_1073741900_1086] {}] datanode.DataXceiver(331): 127.0.0.1:41553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33532 dst: /127.0.0.1:41553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:12,229 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40895,DS-8b6f1ed3-f30c-45af-9e6e-136252990ea9,DISK] 2024-11-22T03:37:12,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741901_1087 (size=5424) 2024-11-22T03:37:12,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741901_1087 (size=5424) 2024-11-22T03:37:12,235 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/.tmp/table/9bbe2ba81acf42378604b12ffd009e59 2024-11-22T03:37:12,242 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/.tmp/info/e0084f32d37c4523ba58b43c7b966379 as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/info/e0084f32d37c4523ba58b43c7b966379 2024-11-22T03:37:12,248 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/info/e0084f32d37c4523ba58b43c7b966379, entries=10, sequenceid=11, filesize=6.9 K 2024-11-22T03:37:12,249 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/.tmp/ns/ebd0b8fbaff94cb0b8eee780ce5453f5 as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/ns/ebd0b8fbaff94cb0b8eee780ce5453f5 2024-11-22T03:37:12,256 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/ns/ebd0b8fbaff94cb0b8eee780ce5453f5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T03:37:12,258 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/.tmp/table/9bbe2ba81acf42378604b12ffd009e59 as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/table/9bbe2ba81acf42378604b12ffd009e59 2024-11-22T03:37:12,265 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/table/9bbe2ba81acf42378604b12ffd009e59, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T03:37:12,266 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 150ms, sequenceid=11, compaction requested=false 2024-11-22T03:37:12,271 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T03:37:12,272 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:37:12,272 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:37:12,272 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246632116Running coprocessor pre-close hooks at 1732246632116Disabling compacts and flushes for region at 1732246632116Disabling writes for close at 1732246632116Obtaining lock to block concurrent updates at 1732246632116Preparing flush snapshotting stores in 1588230740 at 1732246632116Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732246632117 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732246632130 (+13 ms)Flushing 1588230740/info: creating writer at 1732246632130Flushing 1588230740/info: appending metadata at 1732246632147 (+17 ms)Flushing 1588230740/info: closing flushed file at 1732246632147Flushing 1588230740/ns: creating writer at 1732246632173 (+26 ms)Flushing 1588230740/ns: appending metadata at 1732246632190 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732246632190Flushing 1588230740/table: creating writer at 1732246632210 (+20 ms)Flushing 1588230740/table: appending metadata at 1732246632225 (+15 ms)Flushing 1588230740/table: closing flushed file at 1732246632225Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3307a16a: reopening flushed file at 1732246632241 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@297d84ca: reopening flushed file at 1732246632248 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14268cc: reopening flushed file at 1732246632257 (+9 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 150ms, sequenceid=11, compaction requested=false at 1732246632266 (+9 ms)Writing region close event to WAL at 1732246632267 (+1 ms)Running coprocessor post-close hooks at 1732246632272 (+5 ms)Closed at 1732246632272 2024-11-22T03:37:12,273 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:37:12,316 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(976): stopping server b458937b0f5f,39625,1732246592490; all regions closed. 2024-11-22T03:37:12,317 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,317 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,317 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,318 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,318 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:12,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741893_1078 (size=825) 2024-11-22T03:37:12,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741893_1078 (size=825) 2024-11-22T03:37:12,852 INFO [regionserver/b458937b0f5f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T03:37:12,852 INFO [regionserver/b458937b0f5f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T03:37:12,989 INFO [regionserver/b458937b0f5f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:37:13,058 INFO [regionserver/b458937b0f5f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T03:37:13,058 INFO [regionserver/b458937b0f5f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T03:37:13,725 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@28152aad[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41553, datanodeUuid=c941676d-a41a-4732-8e59-96d141820b93, infoPort=44299, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741835_1011 to 127.0.0.1:40895 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:13,725 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@31a99994[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41553, datanodeUuid=c941676d-a41a-4732-8e59-96d141820b93, infoPort=44299, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741831_1007 to 127.0.0.1:40895 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:13,740 INFO [master/b458937b0f5f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T03:37:13,740 INFO [master/b458937b0f5f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T03:37:13,854 INFO [regionserver/b458937b0f5f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:37:14,727 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@31a99994[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41553, datanodeUuid=c941676d-a41a-4732-8e59-96d141820b93, infoPort=44299, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=1595823909;c=1732246589693):Failed to transfer BP-300744400-172.17.0.3-1732246589693:blk_1073741829_1005 to 127.0.0.1:40895 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:14,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:37:15,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741876_1059 (size=12911) 2024-11-22T03:37:15,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:37:15,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:37:16,122 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 after 4001ms 2024-11-22T03:37:16,131 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta after 4002ms 2024-11-22T03:37:16,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:37:17,120 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-22T03:37:17,123 DEBUG [RS:1;b458937b0f5f:43975 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/oldWALs 2024-11-22T03:37:17,123 INFO [RS:1;b458937b0f5f:43975 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C43975%2C1732246593751:(num 1732246593983) 2024-11-22T03:37:17,123 DEBUG [RS:1;b458937b0f5f:43975 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:17,123 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:37:17,124 INFO [RS:1;b458937b0f5f:43975 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:37:17,124 INFO [RS:1;b458937b0f5f:43975 {}] hbase.ChoreService(370): Chore service for: regionserver/b458937b0f5f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:37:17,124 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:37:17,124 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:37:17,124 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:37:17,124 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:37:17,124 INFO [RS:1;b458937b0f5f:43975 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:37:17,125 INFO [RS:1;b458937b0f5f:43975 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43975 2024-11-22T03:37:17,183 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43975-0x101609d56d70002, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b458937b0f5f,43975,1732246593751 2024-11-22T03:37:17,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:37:17,194 INFO [RS:1;b458937b0f5f:43975 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:37:17,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,207 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b458937b0f5f,43975,1732246593751] 2024-11-22T03:37:17,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,215 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,217 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b458937b0f5f,43975,1732246593751 already deleted, retry=false 2024-11-22T03:37:17,217 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b458937b0f5f,43975,1732246593751 expired; onlineServers=1 2024-11-22T03:37:17,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43975-0x101609d56d70002, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:37:17,307 INFO [RS:1;b458937b0f5f:43975 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:37:17,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43975-0x101609d56d70002, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:37:17,307 INFO [RS:1;b458937b0f5f:43975 {}] regionserver.HRegionServer(1031): Exiting; stopping=b458937b0f5f,43975,1732246593751; zookeeper connection closed. 2024-11-22T03:37:17,308 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@334d1f0a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@334d1f0a 2024-11-22T03:37:17,318 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-22T03:37:17,322 DEBUG [RS:0;b458937b0f5f:39625 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/oldWALs 2024-11-22T03:37:17,322 INFO [RS:0;b458937b0f5f:39625 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C39625%2C1732246592490.meta:.meta(num 1732246632117) 2024-11-22T03:37:17,322 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:17,322 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:17,322 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:17,322 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:17,323 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:17,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741890_1074 (size=14682) 2024-11-22T03:37:17,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741890_1074 (size=14682) 2024-11-22T03:37:17,328 DEBUG [RS:0;b458937b0f5f:39625 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/oldWALs 2024-11-22T03:37:17,328 INFO [RS:0;b458937b0f5f:39625 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C39625%2C1732246592490:(num 1732246631798) 2024-11-22T03:37:17,328 DEBUG [RS:0;b458937b0f5f:39625 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:17,328 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:37:17,328 INFO [RS:0;b458937b0f5f:39625 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:37:17,328 INFO [RS:0;b458937b0f5f:39625 {}] hbase.ChoreService(370): Chore service for: regionserver/b458937b0f5f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:37:17,329 INFO [RS:0;b458937b0f5f:39625 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:37:17,329 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:37:17,329 INFO [RS:0;b458937b0f5f:39625 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39625 2024-11-22T03:37:17,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:37:17,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b458937b0f5f,39625,1732246592490 2024-11-22T03:37:17,342 INFO [RS:0;b458937b0f5f:39625 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:37:17,342 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b458937b0f5f,39625,1732246592490] 2024-11-22T03:37:17,365 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b458937b0f5f,39625,1732246592490 already deleted, retry=false 2024-11-22T03:37:17,365 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b458937b0f5f,39625,1732246592490 expired; onlineServers=0 2024-11-22T03:37:17,365 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b458937b0f5f,42831,1732246592320' ***** 2024-11-22T03:37:17,365 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:37:17,365 INFO [M:0;b458937b0f5f:42831 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:37:17,365 INFO [M:0;b458937b0f5f:42831 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:37:17,365 DEBUG [M:0;b458937b0f5f:42831 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:37:17,366 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:37:17,366 DEBUG [M:0;b458937b0f5f:42831 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:37:17,366 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246592857 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246592857,5,FailOnTimeoutGroup] 2024-11-22T03:37:17,366 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246592856 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246592856,5,FailOnTimeoutGroup] 2024-11-22T03:37:17,366 INFO [M:0;b458937b0f5f:42831 {}] hbase.ChoreService(370): Chore service for: master/b458937b0f5f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:37:17,366 INFO [M:0;b458937b0f5f:42831 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:37:17,366 DEBUG [M:0;b458937b0f5f:42831 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:37:17,366 INFO [M:0;b458937b0f5f:42831 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:37:17,366 INFO [M:0;b458937b0f5f:42831 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:37:17,367 INFO [M:0;b458937b0f5f:42831 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:37:17,367 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:37:17,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:37:17,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:17,375 DEBUG [M:0;b458937b0f5f:42831 {}] zookeeper.ZKUtil(347): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:37:17,375 WARN [M:0;b458937b0f5f:42831 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:37:17,376 INFO [M:0;b458937b0f5f:42831 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/.lastflushedseqids 2024-11-22T03:37:17,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741902_1088 (size=130) 2024-11-22T03:37:17,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741902_1088 (size=130) 2024-11-22T03:37:17,384 INFO [M:0;b458937b0f5f:42831 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:37:17,384 INFO [M:0;b458937b0f5f:42831 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:37:17,384 DEBUG [M:0;b458937b0f5f:42831 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:37:17,384 INFO [M:0;b458937b0f5f:42831 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:17,384 DEBUG [M:0;b458937b0f5f:42831 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:17,384 DEBUG [M:0;b458937b0f5f:42831 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:37:17,384 DEBUG [M:0;b458937b0f5f:42831 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:17,385 INFO [M:0;b458937b0f5f:42831 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-11-22T03:37:17,404 DEBUG [M:0;b458937b0f5f:42831 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10972e3cb76a496d84a4254da3add748 is 82, key is hbase:meta,,1/info:regioninfo/1732246593615/Put/seqid=0 2024-11-22T03:37:17,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741903_1089 (size=5672) 2024-11-22T03:37:17,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741903_1089 (size=5672) 2024-11-22T03:37:17,410 INFO [M:0;b458937b0f5f:42831 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10972e3cb76a496d84a4254da3add748 2024-11-22T03:37:17,432 DEBUG [M:0;b458937b0f5f:42831 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8e13e9c40f4b47139237772be87b2218 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732246594272/Put/seqid=0 2024-11-22T03:37:17,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741904_1090 (size=6254) 2024-11-22T03:37:17,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741904_1090 (size=6254) 2024-11-22T03:37:17,438 INFO [M:0;b458937b0f5f:42831 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8e13e9c40f4b47139237772be87b2218 2024-11-22T03:37:17,444 INFO [M:0;b458937b0f5f:42831 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8e13e9c40f4b47139237772be87b2218 2024-11-22T03:37:17,454 INFO [RS:0;b458937b0f5f:39625 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:37:17,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:37:17,454 INFO [RS:0;b458937b0f5f:39625 {}] regionserver.HRegionServer(1031): Exiting; stopping=b458937b0f5f,39625,1732246592490; zookeeper connection closed. 2024-11-22T03:37:17,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39625-0x101609d56d70001, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:37:17,455 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6be94a43 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6be94a43 2024-11-22T03:37:17,455 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-22T03:37:17,460 DEBUG [M:0;b458937b0f5f:42831 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/57d23e7cc3f94833a3c9a16318567695 is 69, key is b458937b0f5f,39625,1732246592490/rs:state/1732246592961/Put/seqid=0 2024-11-22T03:37:17,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741905_1091 (size=5224) 2024-11-22T03:37:17,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741905_1091 (size=5224) 2024-11-22T03:37:17,466 INFO [M:0;b458937b0f5f:42831 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/57d23e7cc3f94833a3c9a16318567695 2024-11-22T03:37:17,487 DEBUG [M:0;b458937b0f5f:42831 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a77667a3cdb141db94667d9d5d1f312a is 52, key is load_balancer_on/state:d/1732246593729/Put/seqid=0 2024-11-22T03:37:17,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741906_1092 (size=5056) 2024-11-22T03:37:17,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741906_1092 (size=5056) 2024-11-22T03:37:17,492 INFO [M:0;b458937b0f5f:42831 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a77667a3cdb141db94667d9d5d1f312a 2024-11-22T03:37:17,499 DEBUG [M:0;b458937b0f5f:42831 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10972e3cb76a496d84a4254da3add748 as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/10972e3cb76a496d84a4254da3add748 2024-11-22T03:37:17,505 INFO [M:0;b458937b0f5f:42831 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/10972e3cb76a496d84a4254da3add748, entries=8, sequenceid=60, filesize=5.5 K 2024-11-22T03:37:17,507 DEBUG [M:0;b458937b0f5f:42831 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8e13e9c40f4b47139237772be87b2218 as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8e13e9c40f4b47139237772be87b2218 2024-11-22T03:37:17,513 INFO [M:0;b458937b0f5f:42831 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8e13e9c40f4b47139237772be87b2218 2024-11-22T03:37:17,513 INFO [M:0;b458937b0f5f:42831 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8e13e9c40f4b47139237772be87b2218, entries=6, sequenceid=60, filesize=6.1 K 2024-11-22T03:37:17,514 DEBUG [M:0;b458937b0f5f:42831 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/57d23e7cc3f94833a3c9a16318567695 as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/57d23e7cc3f94833a3c9a16318567695 2024-11-22T03:37:17,521 INFO [M:0;b458937b0f5f:42831 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/57d23e7cc3f94833a3c9a16318567695, entries=2, sequenceid=60, filesize=5.1 K 2024-11-22T03:37:17,522 DEBUG [M:0;b458937b0f5f:42831 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a77667a3cdb141db94667d9d5d1f312a as hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a77667a3cdb141db94667d9d5d1f312a 2024-11-22T03:37:17,529 INFO [M:0;b458937b0f5f:42831 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a77667a3cdb141db94667d9d5d1f312a, entries=1, sequenceid=60, filesize=4.9 K 2024-11-22T03:37:17,530 INFO [M:0;b458937b0f5f:42831 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=60, compaction requested=false 2024-11-22T03:37:17,532 INFO [M:0;b458937b0f5f:42831 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:17,532 DEBUG [M:0;b458937b0f5f:42831 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246637384Disabling compacts and flushes for region at 1732246637384Disabling writes for close at 1732246637384Obtaining lock to block concurrent updates at 1732246637385 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732246637385Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1732246637385Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732246637386 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732246637386Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732246637404 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732246637404Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732246637416 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732246637431 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732246637431Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732246637444 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732246637459 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732246637459Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732246637472 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732246637486 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732246637486Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@537b407b: reopening flushed file at 1732246637498 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5baed212: reopening flushed file at 1732246637506 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@651d196b: reopening flushed file at 1732246637513 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e1bcaa3: reopening flushed file at 1732246637521 (+8 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=60, compaction requested=false at 1732246637530 (+9 ms)Writing region close event to WAL at 1732246637532 (+2 ms)Closed at 1732246637532 2024-11-22T03:37:17,532 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:17,533 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:17,533 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:17,533 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:17,533 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:17,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741889_1072 (size=1045) 2024-11-22T03:37:17,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41553 is added to blk_1073741889_1072 (size=1045) 2024-11-22T03:37:17,717 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:37:17,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,746 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,746 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:17,891 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T03:37:17,892 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:37:17,892 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:37:17,892 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T03:37:18,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:18,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:18,755 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@50864177 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-300744400-172.17.0.3-1732246589693:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:46063,null,null]) java.net.ConnectException: Call From b458937b0f5f/172.17.0.3 to localhost:35355 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T03:37:18,888 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/WALs/b458937b0f5f,42831,1732246592320/b458937b0f5f%2C42831%2C1732246592320.1732246592632 to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/oldWALs/b458937b0f5f%2C42831%2C1732246592320.1732246592632 2024-11-22T03:37:18,892 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/MasterData/oldWALs/b458937b0f5f%2C42831%2C1732246592320.1732246592632 to hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/oldWALs/b458937b0f5f%2C42831%2C1732246592320.1732246592632$masterlocalwal$ 2024-11-22T03:37:18,893 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:37:18,893 INFO [M:0;b458937b0f5f:42831 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:37:18,893 INFO [M:0;b458937b0f5f:42831 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42831 2024-11-22T03:37:18,893 INFO [M:0;b458937b0f5f:42831 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:37:19,012 INFO [M:0;b458937b0f5f:42831 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:37:19,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:37:19,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42831-0x101609d56d70000, quorum=127.0.0.1:57543, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:37:19,015 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f60aba7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:19,015 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1bc2fb05{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:37:19,015 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:37:19,015 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4091ef21{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:37:19,016 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a4db74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,STOPPED} 2024-11-22T03:37:19,017 WARN [BP-300744400-172.17.0.3-1732246589693 heartbeating to localhost/127.0.0.1:33231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:37:19,017 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:37:19,017 WARN [BP-300744400-172.17.0.3-1732246589693 heartbeating to localhost/127.0.0.1:33231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-300744400-172.17.0.3-1732246589693 (Datanode Uuid c941676d-a41a-4732-8e59-96d141820b93) service to localhost/127.0.0.1:33231 2024-11-22T03:37:19,017 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:37:19,017 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5346f772 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-300744400-172.17.0.3-1732246589693:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:46063,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:35355 , LocalHost:localPort b458937b0f5f/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T03:37:19,018 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5346f772 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-300744400-172.17.0.3-1732246589693:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:41553,null,null], DatanodeInfoWithStorage[127.0.0.1:46063,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-300744400-172.17.0.3-1732246589693 2024-11-22T03:37:19,018 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5346f772 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:46063,null,null]) java.io.IOException: No block pool offer service for bpid=BP-300744400-172.17.0.3-1732246589693 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:19,018 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5346f772 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41553,null,null]) java.io.IOException: No block pool offer service for bpid=BP-300744400-172.17.0.3-1732246589693 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:19,018 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5346f772 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:46063,null,null], DatanodeInfoWithStorage[127.0.0.1:41553,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-300744400-172.17.0.3-1732246589693:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:46063,null,null], DatanodeInfoWithStorage[127.0.0.1:41553,null,null]] 2024-11-22T03:37:19,019 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data3/current/BP-300744400-172.17.0.3-1732246589693 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:19,020 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data4/current/BP-300744400-172.17.0.3-1732246589693 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:19,020 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:37:19,022 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@106410c2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:19,022 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@714a0d6a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:37:19,022 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:37:19,023 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@668b2d84{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:37:19,023 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@746d5659{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,STOPPED} 2024-11-22T03:37:19,025 WARN [BP-300744400-172.17.0.3-1732246589693 heartbeating to localhost/127.0.0.1:33231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:37:19,025 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:37:19,025 WARN [BP-300744400-172.17.0.3-1732246589693 heartbeating to localhost/127.0.0.1:33231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-300744400-172.17.0.3-1732246589693 (Datanode Uuid d7569e07-f53e-44f6-ac1a-4b4f451d2c43) service to localhost/127.0.0.1:33231 2024-11-22T03:37:19,025 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:37:19,026 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data5/current/BP-300744400-172.17.0.3-1732246589693 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:19,026 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/cluster_9bfb6e0e-f2ee-1741-f67e-14701fd784c6/data/data6/current/BP-300744400-172.17.0.3-1732246589693 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:19,027 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:37:19,034 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@657d9fe5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:37:19,035 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@77a48b4f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:37:19,035 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:37:19,035 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c23a77f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:37:19,035 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28c5c44c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir/,STOPPED} 2024-11-22T03:37:19,043 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:37:19,083 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:37:19,091 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 80) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:33231 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33231 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:33231 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41987 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f83e0bf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33231 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:33231 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:41987 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:33231 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33231 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:33231 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:33231 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33231 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f83e0bf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33231 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 407) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=274 (was 207) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6323 (was 7273) 2024-11-22T03:37:19,099 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=156, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=274, ProcessCount=11, AvailableMemoryMB=6323 2024-11-22T03:37:19,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:37:19,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.log.dir so I do NOT create it in target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf 2024-11-22T03:37:19,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bf9e6df-e6f1-d373-b417-3d3dc909337f/hadoop.tmp.dir so I do NOT create it in target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf 2024-11-22T03:37:19,099 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa, deleteOnExit=true 2024-11-22T03:37:19,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:37:19,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/test.cache.data in system properties and HBase conf 2024-11-22T03:37:19,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:37:19,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:37:19,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:37:19,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:37:19,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:37:19,100 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:37:19,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:37:19,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:37:19,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:37:19,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:37:19,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:37:19,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:37:19,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:37:19,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:37:19,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:37:19,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:37:19,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:37:19,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:37:19,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:37:19,102 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:37:19,114 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:37:19,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:19,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:19,441 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:37:19,449 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:37:19,450 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:37:19,450 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:37:19,451 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:37:19,453 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:37:19,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e024519{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:37:19,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e0a81cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:37:19,559 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35a3721f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/java.io.tmpdir/jetty-localhost-44051-hadoop-hdfs-3_4_1-tests_jar-_-any-12530650361177295689/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:37:19,560 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4946acec{HTTP/1.1, (http/1.1)}{localhost:44051} 2024-11-22T03:37:19,560 INFO [Time-limited test {}] server.Server(415): Started @158977ms 2024-11-22T03:37:19,572 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:37:19,841 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:37:19,845 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:37:19,848 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:37:19,848 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:37:19,849 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:37:19,849 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72003751{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:37:19,850 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c88640d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:37:19,952 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c9cf8e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/java.io.tmpdir/jetty-localhost-40259-hadoop-hdfs-3_4_1-tests_jar-_-any-16643336240590255070/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:19,953 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2a543eba{HTTP/1.1, (http/1.1)}{localhost:40259} 2024-11-22T03:37:19,953 INFO [Time-limited test {}] server.Server(415): Started @159371ms 2024-11-22T03:37:19,954 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:37:19,982 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:37:19,986 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:37:19,987 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:37:19,987 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:37:19,987 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:37:19,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4176bee4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:37:19,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fa19949{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:37:20,091 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c6bf72b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/java.io.tmpdir/jetty-localhost-35927-hadoop-hdfs-3_4_1-tests_jar-_-any-7645503279119840060/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:20,091 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2a0b4054{HTTP/1.1, (http/1.1)}{localhost:35927} 2024-11-22T03:37:20,091 INFO [Time-limited test {}] server.Server(415): Started @159509ms 2024-11-22T03:37:20,092 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:37:20,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:20,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:21,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:21,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:21,167 WARN [Thread-1200 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data1/current/BP-1619542008-172.17.0.3-1732246639125/current, will proceed with Du for space computation calculation, 2024-11-22T03:37:21,167 WARN [Thread-1201 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data2/current/BP-1619542008-172.17.0.3-1732246639125/current, will proceed with Du for space computation calculation, 2024-11-22T03:37:21,187 WARN [Thread-1164 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:37:21,190 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x29c9bbcba16f08cf with lease ID 0x6e168799b2bee361: Processing first storage report for DS-bf082d06-202c-45c4-97f0-078cf3beb7a4 from datanode DatanodeRegistration(127.0.0.1:46577, datanodeUuid=5f35e234-81b4-4577-86f5-8bb75b7b7867, infoPort=38131, infoSecurePort=0, ipcPort=43375, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125) 2024-11-22T03:37:21,190 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x29c9bbcba16f08cf with lease ID 0x6e168799b2bee361: from storage DS-bf082d06-202c-45c4-97f0-078cf3beb7a4 node DatanodeRegistration(127.0.0.1:46577, datanodeUuid=5f35e234-81b4-4577-86f5-8bb75b7b7867, infoPort=38131, infoSecurePort=0, ipcPort=43375, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:21,190 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x29c9bbcba16f08cf with lease ID 0x6e168799b2bee361: Processing first storage report for DS-93a76568-9857-4ed5-8600-d7e1b0c2d0cd from datanode DatanodeRegistration(127.0.0.1:46577, datanodeUuid=5f35e234-81b4-4577-86f5-8bb75b7b7867, infoPort=38131, infoSecurePort=0, ipcPort=43375, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125) 2024-11-22T03:37:21,190 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x29c9bbcba16f08cf with lease ID 0x6e168799b2bee361: from storage DS-93a76568-9857-4ed5-8600-d7e1b0c2d0cd node DatanodeRegistration(127.0.0.1:46577, datanodeUuid=5f35e234-81b4-4577-86f5-8bb75b7b7867, infoPort=38131, infoSecurePort=0, ipcPort=43375, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:21,296 WARN [Thread-1211 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data3/current/BP-1619542008-172.17.0.3-1732246639125/current, will proceed with Du for space computation calculation, 2024-11-22T03:37:21,296 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data4/current/BP-1619542008-172.17.0.3-1732246639125/current, will proceed with Du for space computation calculation, 2024-11-22T03:37:21,315 WARN [Thread-1187 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:37:21,318 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x24fa8a9a4502b988 with lease ID 0x6e168799b2bee362: Processing first storage report for DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb from datanode DatanodeRegistration(127.0.0.1:36237, datanodeUuid=167308fd-428f-4fb9-b897-8bc78e712518, infoPort=44749, infoSecurePort=0, ipcPort=39671, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125) 2024-11-22T03:37:21,318 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24fa8a9a4502b988 with lease ID 0x6e168799b2bee362: from storage DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb node DatanodeRegistration(127.0.0.1:36237, datanodeUuid=167308fd-428f-4fb9-b897-8bc78e712518, infoPort=44749, infoSecurePort=0, ipcPort=39671, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:21,318 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x24fa8a9a4502b988 with lease ID 0x6e168799b2bee362: Processing first storage report for DS-012c31a7-2fe7-4d53-94f0-f75cb1f72d59 from datanode DatanodeRegistration(127.0.0.1:36237, datanodeUuid=167308fd-428f-4fb9-b897-8bc78e712518, infoPort=44749, infoSecurePort=0, ipcPort=39671, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125) 2024-11-22T03:37:21,318 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24fa8a9a4502b988 with lease ID 0x6e168799b2bee362: from storage DS-012c31a7-2fe7-4d53-94f0-f75cb1f72d59 node DatanodeRegistration(127.0.0.1:36237, datanodeUuid=167308fd-428f-4fb9-b897-8bc78e712518, infoPort=44749, infoSecurePort=0, ipcPort=39671, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:21,339 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf 2024-11-22T03:37:21,342 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/zookeeper_0, clientPort=64530, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:37:21,343 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64530 2024-11-22T03:37:21,343 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:21,345 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:21,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46577 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:37:21,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:37:21,356 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f with version=8 2024-11-22T03:37:21,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/hbase-staging 2024-11-22T03:37:21,358 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:37:21,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:37:21,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:37:21,358 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:37:21,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:37:21,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:37:21,358 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:37:21,358 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:37:21,359 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41217 2024-11-22T03:37:21,361 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41217 connecting to ZooKeeper ensemble=127.0.0.1:64530 2024-11-22T03:37:21,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:412170x0, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:37:21,420 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41217-0x101609e16690000 connected 2024-11-22T03:37:21,502 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:21,504 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:21,506 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:37:21,507 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f, hbase.cluster.distributed=false 2024-11-22T03:37:21,509 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:37:21,510 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41217 2024-11-22T03:37:21,510 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41217 2024-11-22T03:37:21,510 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41217 2024-11-22T03:37:21,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41217 2024-11-22T03:37:21,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41217 2024-11-22T03:37:21,529 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:37:21,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:37:21,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:37:21,529 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:37:21,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:37:21,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:37:21,529 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:37:21,529 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:37:21,530 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36003 2024-11-22T03:37:21,532 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36003 connecting to ZooKeeper ensemble=127.0.0.1:64530 2024-11-22T03:37:21,532 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:21,534 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:21,544 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360030x0, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:37:21,544 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:360030x0, quorum=127.0.0.1:64530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:37:21,544 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36003-0x101609e16690001 connected 2024-11-22T03:37:21,544 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:37:21,545 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:37:21,545 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:37:21,546 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:37:21,547 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36003 2024-11-22T03:37:21,547 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36003 2024-11-22T03:37:21,547 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36003 2024-11-22T03:37:21,548 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36003 2024-11-22T03:37:21,548 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36003 2024-11-22T03:37:21,562 DEBUG [M:0;b458937b0f5f:41217 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b458937b0f5f:41217 2024-11-22T03:37:21,562 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b458937b0f5f,41217,1732246641358 2024-11-22T03:37:21,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:37:21,575 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:37:21,576 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b458937b0f5f,41217,1732246641358 2024-11-22T03:37:21,585 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:37:21,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:21,586 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:21,586 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:37:21,587 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b458937b0f5f,41217,1732246641358 from backup master directory 2024-11-22T03:37:21,596 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:37:21,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b458937b0f5f,41217,1732246641358 2024-11-22T03:37:21,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:37:21,596 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:37:21,596 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b458937b0f5f,41217,1732246641358 2024-11-22T03:37:21,602 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/hbase.id] with ID: 4d1a5cbd-d5a1-4114-ac36-4a084a9e9d70 2024-11-22T03:37:21,602 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/.tmp/hbase.id 2024-11-22T03:37:21,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46577 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:37:21,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:37:21,611 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/.tmp/hbase.id]:[hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/hbase.id] 2024-11-22T03:37:21,623 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:21,624 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:37:21,626 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T03:37:21,638 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:21,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:21,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:37:21,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46577 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:37:21,648 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:37:21,649 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:37:21,649 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:37:21,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46577 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:37:21,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:37:21,657 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store 2024-11-22T03:37:21,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:37:21,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46577 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:37:21,666 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:37:21,666 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:37:21,666 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:21,666 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:21,666 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:37:21,666 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:21,667 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:21,667 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246641666Disabling compacts and flushes for region at 1732246641666Disabling writes for close at 1732246641666Writing region close event to WAL at 1732246641667 (+1 ms)Closed at 1732246641667 2024-11-22T03:37:21,667 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/.initializing 2024-11-22T03:37:21,668 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/WALs/b458937b0f5f,41217,1732246641358 2024-11-22T03:37:21,670 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C41217%2C1732246641358, suffix=, logDir=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/WALs/b458937b0f5f,41217,1732246641358, archiveDir=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/oldWALs, maxLogs=10 2024-11-22T03:37:21,671 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C41217%2C1732246641358.1732246641671 2024-11-22T03:37:21,676 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/WALs/b458937b0f5f,41217,1732246641358/b458937b0f5f%2C41217%2C1732246641358.1732246641671 2024-11-22T03:37:21,685 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44749:44749),(127.0.0.1/127.0.0.1:38131:38131)] 2024-11-22T03:37:21,686 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:37:21,686 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:37:21,686 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:21,686 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:21,688 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:21,689 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:37:21,689 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:21,689 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:21,690 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:21,691 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:37:21,691 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:21,691 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:37:21,691 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:21,693 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:37:21,693 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:21,693 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:37:21,694 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:21,695 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:37:21,695 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:21,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:37:21,696 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:21,697 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:21,697 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:21,699 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:21,699 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:21,699 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:37:21,700 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:21,705 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:37:21,705 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794618, jitterRate=0.010410204529762268}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:37:21,706 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732246641686Initializing all the Stores at 1732246641687 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246641687Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246641687Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246641687Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246641687Cleaning up temporary data from old regions at 1732246641699 (+12 ms)Region opened successfully at 1732246641706 (+7 ms) 2024-11-22T03:37:21,706 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:37:21,710 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2535fc9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:37:21,711 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:37:21,711 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:37:21,711 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:37:21,711 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:37:21,712 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T03:37:21,712 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T03:37:21,712 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:37:21,715 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:37:21,715 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:37:21,775 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:37:21,775 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:37:21,776 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:37:21,796 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:37:21,797 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:37:21,798 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:37:21,806 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:37:21,808 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:37:21,817 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:37:21,820 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:37:21,827 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:37:21,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:37:21,838 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:37:21,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:21,838 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:21,839 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b458937b0f5f,41217,1732246641358, sessionid=0x101609e16690000, setting cluster-up flag (Was=false) 2024-11-22T03:37:21,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:21,859 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:21,891 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:37:21,892 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,41217,1732246641358 2024-11-22T03:37:21,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:21,912 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:21,943 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:37:21,945 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,41217,1732246641358 2024-11-22T03:37:21,946 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:37:21,948 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:37:21,948 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:37:21,948 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:37:21,949 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b458937b0f5f,41217,1732246641358 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:37:21,950 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(746): ClusterId : 4d1a5cbd-d5a1-4114-ac36-4a084a9e9d70 2024-11-22T03:37:21,950 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:37:21,950 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:37:21,950 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:37:21,950 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:37:21,950 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:37:21,951 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b458937b0f5f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:37:21,951 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:21,951 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:37:21,951 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:21,952 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732246671952 2024-11-22T03:37:21,952 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:37:21,952 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:37:21,952 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:37:21,952 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:37:21,952 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:37:21,952 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:37:21,953 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:21,953 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:37:21,953 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:37:21,953 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:37:21,953 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:37:21,953 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:37:21,954 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:37:21,954 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:37:21,954 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246641954,5,FailOnTimeoutGroup] 2024-11-22T03:37:21,954 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246641954,5,FailOnTimeoutGroup] 2024-11-22T03:37:21,954 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:21,954 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:37:21,954 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:21,954 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:21,954 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:21,955 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:37:21,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:37:21,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46577 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:37:21,962 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:37:21,963 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f 2024-11-22T03:37:21,963 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:37:21,963 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:37:21,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:37:21,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46577 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:37:21,976 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:37:21,976 DEBUG [RS:0;b458937b0f5f:36003 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14cd1f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:37:21,990 DEBUG [RS:0;b458937b0f5f:36003 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b458937b0f5f:36003 2024-11-22T03:37:21,990 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:37:21,990 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:37:21,990 DEBUG [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:37:21,991 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(2659): reportForDuty to master=b458937b0f5f,41217,1732246641358 with port=36003, startcode=1732246641529 2024-11-22T03:37:21,991 DEBUG [RS:0;b458937b0f5f:36003 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:37:21,994 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46789, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:37:21,994 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41217 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b458937b0f5f,36003,1732246641529 2024-11-22T03:37:21,995 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41217 {}] master.ServerManager(517): Registering regionserver=b458937b0f5f,36003,1732246641529 2024-11-22T03:37:21,996 DEBUG [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f 2024-11-22T03:37:21,997 DEBUG [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34053 2024-11-22T03:37:21,997 DEBUG [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:37:22,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:37:22,007 DEBUG [RS:0;b458937b0f5f:36003 {}] zookeeper.ZKUtil(111): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b458937b0f5f,36003,1732246641529 2024-11-22T03:37:22,007 WARN [RS:0;b458937b0f5f:36003 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:37:22,007 INFO [RS:0;b458937b0f5f:36003 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:37:22,007 DEBUG [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529 2024-11-22T03:37:22,007 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b458937b0f5f,36003,1732246641529] 2024-11-22T03:37:22,011 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:37:22,013 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:37:22,013 INFO [RS:0;b458937b0f5f:36003 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:37:22,013 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,013 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:37:22,014 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:37:22,014 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,014 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:22,014 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:22,014 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:22,014 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:22,014 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:22,015 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:37:22,015 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:22,015 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:22,015 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:22,015 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:22,015 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:22,015 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:22,015 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:37:22,015 DEBUG [RS:0;b458937b0f5f:36003 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:37:22,015 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,015 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,016 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,016 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,016 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,016 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,36003,1732246641529-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:37:22,032 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:37:22,032 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,36003,1732246641529-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,033 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,033 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.Replication(171): b458937b0f5f,36003,1732246641529 started 2024-11-22T03:37:22,049 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,049 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(1482): Serving as b458937b0f5f,36003,1732246641529, RpcServer on b458937b0f5f/172.17.0.3:36003, sessionid=0x101609e16690001 2024-11-22T03:37:22,049 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:37:22,049 DEBUG [RS:0;b458937b0f5f:36003 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b458937b0f5f,36003,1732246641529 2024-11-22T03:37:22,049 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,36003,1732246641529' 2024-11-22T03:37:22,049 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:37:22,050 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:37:22,050 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:37:22,050 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:37:22,050 DEBUG [RS:0;b458937b0f5f:36003 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b458937b0f5f,36003,1732246641529 2024-11-22T03:37:22,051 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,36003,1732246641529' 2024-11-22T03:37:22,051 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:37:22,051 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:37:22,051 DEBUG [RS:0;b458937b0f5f:36003 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:37:22,051 INFO [RS:0;b458937b0f5f:36003 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:37:22,051 INFO [RS:0;b458937b0f5f:36003 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:37:22,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:22,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:22,154 INFO [RS:0;b458937b0f5f:36003 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C36003%2C1732246641529, suffix=, logDir=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529, archiveDir=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/oldWALs, maxLogs=32 2024-11-22T03:37:22,155 INFO [RS:0;b458937b0f5f:36003 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C36003%2C1732246641529.1732246642155 2024-11-22T03:37:22,166 INFO [RS:0;b458937b0f5f:36003 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 2024-11-22T03:37:22,166 DEBUG [RS:0;b458937b0f5f:36003 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44749:44749),(127.0.0.1/127.0.0.1:38131:38131)] 2024-11-22T03:37:22,371 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:37:22,373 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:37:22,374 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:37:22,374 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:22,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:22,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:37:22,377 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:37:22,377 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:22,377 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:22,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:37:22,379 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:37:22,379 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:22,380 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:22,380 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:37:22,382 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:37:22,382 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:22,382 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:22,382 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:37:22,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740 2024-11-22T03:37:22,384 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740 2024-11-22T03:37:22,386 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:37:22,386 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:37:22,387 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:37:22,388 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:37:22,391 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:37:22,391 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=816589, jitterRate=0.03834697604179382}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:37:22,392 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732246642371Initializing all the Stores at 1732246642372 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246642372Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246642372Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246642372Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246642372Cleaning up temporary data from old regions at 1732246642386 (+14 ms)Region opened successfully at 1732246642392 (+6 ms) 2024-11-22T03:37:22,392 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:37:22,392 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:37:22,392 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:37:22,393 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 1 ms 2024-11-22T03:37:22,393 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:37:22,393 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:37:22,393 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246642392Disabling compacts and flushes for region at 1732246642392Disabling writes for close at 1732246642393 (+1 ms)Writing region close event to WAL at 1732246642393Closed at 1732246642393 2024-11-22T03:37:22,395 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:37:22,395 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:37:22,395 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:37:22,396 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:37:22,398 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:37:22,548 DEBUG [b458937b0f5f:41217 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:37:22,549 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b458937b0f5f,36003,1732246641529 2024-11-22T03:37:22,550 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,36003,1732246641529, state=OPENING 2024-11-22T03:37:22,605 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:37:22,617 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:22,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:22,618 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:37:22,618 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:37:22,618 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:37:22,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,36003,1732246641529}] 2024-11-22T03:37:22,772 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:37:22,775 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59043, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:37:22,780 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:37:22,780 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:37:22,783 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C36003%2C1732246641529.meta, suffix=.meta, logDir=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529, archiveDir=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/oldWALs, maxLogs=32 2024-11-22T03:37:22,784 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C36003%2C1732246641529.meta.1732246642784.meta 2024-11-22T03:37:22,790 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.meta.1732246642784.meta 2024-11-22T03:37:22,796 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38131:38131),(127.0.0.1/127.0.0.1:44749:44749)] 2024-11-22T03:37:22,797 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:37:22,797 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:37:22,797 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:37:22,798 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:37:22,798 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:37:22,798 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:37:22,798 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:37:22,798 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:37:22,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:37:22,800 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:37:22,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:22,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:22,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:37:22,801 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:37:22,801 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:22,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:22,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:37:22,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:37:22,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:22,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:22,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:37:22,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:37:22,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:22,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:22,805 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:37:22,805 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740 2024-11-22T03:37:22,807 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740 2024-11-22T03:37:22,808 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:37:22,808 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:37:22,809 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:37:22,810 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:37:22,811 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783440, jitterRate=-0.003805503249168396}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:37:22,811 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:37:22,812 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732246642798Writing region info on filesystem at 1732246642798Initializing all the Stores at 1732246642799 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246642799Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246642799Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246642799Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246642799Cleaning up temporary data from old regions at 1732246642808 (+9 ms)Running coprocessor post-open hooks at 1732246642811 (+3 ms)Region opened successfully at 1732246642812 (+1 ms) 2024-11-22T03:37:22,813 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732246642771 2024-11-22T03:37:22,816 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:37:22,816 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:37:22,817 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b458937b0f5f,36003,1732246641529 2024-11-22T03:37:22,818 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,36003,1732246641529, state=OPEN 2024-11-22T03:37:22,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:37:22,854 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:37:22,854 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b458937b0f5f,36003,1732246641529 2024-11-22T03:37:22,854 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:37:22,854 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:37:22,858 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:37:22,858 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,36003,1732246641529 in 236 msec 2024-11-22T03:37:22,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:37:22,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 463 msec 2024-11-22T03:37:22,862 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:37:22,862 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:37:22,864 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:37:22,864 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,36003,1732246641529, seqNum=-1] 2024-11-22T03:37:22,865 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:37:22,867 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43721, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:37:22,873 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 925 msec 2024-11-22T03:37:22,874 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732246642874, completionTime=-1 2024-11-22T03:37:22,874 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:37:22,874 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:37:22,876 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:37:22,876 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732246702876 2024-11-22T03:37:22,876 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732246762876 2024-11-22T03:37:22,877 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T03:37:22,877 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,41217,1732246641358-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,877 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,41217,1732246641358-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,877 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,41217,1732246641358-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,877 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b458937b0f5f:41217, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,877 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,878 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,880 DEBUG [master/b458937b0f5f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:37:22,883 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.287sec 2024-11-22T03:37:22,883 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:37:22,883 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:37:22,883 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:37:22,883 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:37:22,883 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:37:22,883 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,41217,1732246641358-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:37:22,883 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,41217,1732246641358-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:37:22,886 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:37:22,887 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:37:22,887 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,41217,1732246641358-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:22,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5904db80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:37:22,951 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b458937b0f5f,41217,-1 for getting cluster id 2024-11-22T03:37:22,951 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:37:22,953 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4d1a5cbd-d5a1-4114-ac36-4a084a9e9d70' 2024-11-22T03:37:22,953 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:37:22,953 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4d1a5cbd-d5a1-4114-ac36-4a084a9e9d70" 2024-11-22T03:37:22,954 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d85c27e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:37:22,954 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b458937b0f5f,41217,-1] 2024-11-22T03:37:22,954 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:37:22,954 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:22,956 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41722, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:37:22,957 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7889bd43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:37:22,958 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:37:22,959 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,36003,1732246641529, seqNum=-1] 2024-11-22T03:37:22,959 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:37:22,961 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58696, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:37:22,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b458937b0f5f,41217,1732246641358 2024-11-22T03:37:22,964 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:22,967 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:37:22,967 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-22T03:37:22,967 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-22T03:37:22,967 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T03:37:22,968 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is b458937b0f5f,41217,1732246641358 2024-11-22T03:37:22,968 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@67d03189 2024-11-22T03:37:22,969 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T03:37:22,971 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41732, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T03:37:22,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41217 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T03:37:22,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41217 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T03:37:22,972 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41217 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:37:22,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41217 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T03:37:22,975 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T03:37:22,975 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:22,975 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41217 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-22T03:37:22,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:37:22,977 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T03:37:22,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46577 is added to blk_1073741835_1011 (size=395) 2024-11-22T03:37:22,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741835_1011 (size=395) 2024-11-22T03:37:22,988 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f7611e418df107e3e0c5768c0e301545, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f 2024-11-22T03:37:22,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46577 is added to blk_1073741836_1012 (size=78) 2024-11-22T03:37:22,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741836_1012 (size=78) 2024-11-22T03:37:22,995 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:37:22,995 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing f7611e418df107e3e0c5768c0e301545, disabling compactions & flushes 2024-11-22T03:37:22,995 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:22,995 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:22,995 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. after waiting 0 ms 2024-11-22T03:37:22,995 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:22,995 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:22,996 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for f7611e418df107e3e0c5768c0e301545: Waiting for close lock at 1732246642995Disabling compacts and flushes for region at 1732246642995Disabling writes for close at 1732246642995Writing region close event to WAL at 1732246642995Closed at 1732246642995 2024-11-22T03:37:22,997 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T03:37:22,998 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732246642997"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732246642997"}]},"ts":"1732246642997"} 2024-11-22T03:37:23,000 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T03:37:23,002 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T03:37:23,002 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732246643002"}]},"ts":"1732246643002"} 2024-11-22T03:37:23,004 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-22T03:37:23,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f7611e418df107e3e0c5768c0e301545, ASSIGN}] 2024-11-22T03:37:23,006 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f7611e418df107e3e0c5768c0e301545, ASSIGN 2024-11-22T03:37:23,007 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f7611e418df107e3e0c5768c0e301545, ASSIGN; state=OFFLINE, location=b458937b0f5f,36003,1732246641529; forceNewPlan=false, retain=false 2024-11-22T03:37:23,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:23,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:23,158 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f7611e418df107e3e0c5768c0e301545, regionState=OPENING, regionLocation=b458937b0f5f,36003,1732246641529 2024-11-22T03:37:23,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f7611e418df107e3e0c5768c0e301545, ASSIGN because future has completed 2024-11-22T03:37:23,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7611e418df107e3e0c5768c0e301545, server=b458937b0f5f,36003,1732246641529}] 2024-11-22T03:37:23,322 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:23,322 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f7611e418df107e3e0c5768c0e301545, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:37:23,323 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:23,323 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:37:23,323 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:23,323 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:23,325 INFO [StoreOpener-f7611e418df107e3e0c5768c0e301545-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:23,326 INFO [StoreOpener-f7611e418df107e3e0c5768c0e301545-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f7611e418df107e3e0c5768c0e301545 columnFamilyName info 2024-11-22T03:37:23,326 DEBUG [StoreOpener-f7611e418df107e3e0c5768c0e301545-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:23,327 INFO [StoreOpener-f7611e418df107e3e0c5768c0e301545-1 {}] regionserver.HStore(327): Store=f7611e418df107e3e0c5768c0e301545/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:37:23,327 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:23,328 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/default/TestLogRolling-testLogRollOnPipelineRestart/f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:23,328 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/default/TestLogRolling-testLogRollOnPipelineRestart/f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:23,329 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:23,329 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:23,331 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:23,333 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/default/TestLogRolling-testLogRollOnPipelineRestart/f7611e418df107e3e0c5768c0e301545/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:37:23,333 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f7611e418df107e3e0c5768c0e301545; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=801250, jitterRate=0.018842697143554688}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:37:23,334 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:23,334 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f7611e418df107e3e0c5768c0e301545: Running coprocessor pre-open hook at 1732246643323Writing region info on filesystem at 1732246643323Initializing all the Stores at 1732246643324 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246643324Cleaning up temporary data from old regions at 1732246643329 (+5 ms)Running coprocessor post-open hooks at 1732246643334 (+5 ms)Region opened successfully at 1732246643334 2024-11-22T03:37:23,336 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545., pid=6, masterSystemTime=1732246643317 2024-11-22T03:37:23,338 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:23,338 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:23,339 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f7611e418df107e3e0c5768c0e301545, regionState=OPEN, openSeqNum=2, regionLocation=b458937b0f5f,36003,1732246641529 2024-11-22T03:37:23,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7611e418df107e3e0c5768c0e301545, server=b458937b0f5f,36003,1732246641529 because future has completed 2024-11-22T03:37:23,345 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T03:37:23,345 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f7611e418df107e3e0c5768c0e301545, server=b458937b0f5f,36003,1732246641529 in 180 msec 2024-11-22T03:37:23,348 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T03:37:23,348 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f7611e418df107e3e0c5768c0e301545, ASSIGN in 340 msec 2024-11-22T03:37:23,349 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T03:37:23,349 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732246643349"}]},"ts":"1732246643349"} 2024-11-22T03:37:23,351 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-22T03:37:23,352 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T03:37:23,355 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 381 msec 2024-11-22T03:37:23,394 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:37:23,414 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:23,414 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:23,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:23,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:23,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:23,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:23,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:23,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:23,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:23,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:24,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:24,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:25,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:25,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:26,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:26,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:27,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:27,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:28,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:28,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:28,301 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:37:28,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:28,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:28,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:28,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:28,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:28,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:28,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:28,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:28,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:28,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:28,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:37:28,340 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T03:37:28,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T03:37:28,341 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-22T03:37:28,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:37:28,341 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T03:37:28,342 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T03:37:28,342 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T03:37:28,343 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T03:37:28,343 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-22T03:37:29,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:29,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:30,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:30,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:31,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:31,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:32,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:32,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:33,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:37:33,084 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-22T03:37:33,084 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-22T03:37:33,088 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T03:37:33,089 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:33,094 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545., hostname=b458937b0f5f,36003,1732246641529, seqNum=2] 2024-11-22T03:37:33,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:33,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:34,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:34,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:35,098 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 2024-11-22T03:37:35,099 WARN [ResponseProcessor for block BP-1619542008-172.17.0.3-1732246639125:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1619542008-172.17.0.3-1732246639125:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:35,099 WARN [ResponseProcessor for block BP-1619542008-172.17.0.3-1732246639125:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1619542008-172.17.0.3-1732246639125:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:35,099 WARN [DataStreamer for file /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 block BP-1619542008-172.17.0.3-1732246639125:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1619542008-172.17.0.3-1732246639125:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36237,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK], DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36237,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK]) is bad. 2024-11-22T03:37:35,099 WARN [DataStreamer for file /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/WALs/b458937b0f5f,41217,1732246641358/b458937b0f5f%2C41217%2C1732246641358.1732246641671 block BP-1619542008-172.17.0.3-1732246639125:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1619542008-172.17.0.3-1732246639125:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36237,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK], DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36237,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK]) is bad. 2024-11-22T03:37:35,099 WARN [ResponseProcessor for block BP-1619542008-172.17.0.3-1732246639125:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1619542008-172.17.0.3-1732246639125:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1619542008-172.17.0.3-1732246639125:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:36237,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:35,099 WARN [DataStreamer for file /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.meta.1732246642784.meta block BP-1619542008-172.17.0.3-1732246639125:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1619542008-172.17.0.3-1732246639125:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK], DatanodeInfoWithStorage[127.0.0.1:36237,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36237,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK]) is bad. 2024-11-22T03:37:35,099 WARN [PacketResponder: BP-1619542008-172.17.0.3-1732246639125:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36237] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:35,100 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1301876972_22 at /127.0.0.1:33188 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46577:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33188 dst: /127.0.0.1:46577 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:35,100 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1301876972_22 at /127.0.0.1:35530 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35530 dst: /127.0.0.1:36237 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:35,100 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1301876972_22 at /127.0.0.1:35526 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35526 dst: /127.0.0.1:36237 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:35,100 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1301876972_22 at /127.0.0.1:33184 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46577:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33184 dst: /127.0.0.1:46577 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:35,100 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1058929704_22 at /127.0.0.1:43524 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46577:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43524 dst: /127.0.0.1:46577 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:35,100 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1058929704_22 at /127.0.0.1:47642 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47642 dst: /127.0.0.1:36237 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:35,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:35,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:35,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c6bf72b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:35,186 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a0b4054{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:37:35,186 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:37:35,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fa19949{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:37:35,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4176bee4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,STOPPED} 2024-11-22T03:37:35,188 WARN [BP-1619542008-172.17.0.3-1732246639125 heartbeating to localhost/127.0.0.1:34053 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:37:35,189 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:37:35,189 WARN [BP-1619542008-172.17.0.3-1732246639125 heartbeating to localhost/127.0.0.1:34053 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1619542008-172.17.0.3-1732246639125 (Datanode Uuid 167308fd-428f-4fb9-b897-8bc78e712518) service to localhost/127.0.0.1:34053 2024-11-22T03:37:35,189 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:37:35,189 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data3/current/BP-1619542008-172.17.0.3-1732246639125 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:35,190 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data4/current/BP-1619542008-172.17.0.3-1732246639125 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:35,190 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:37:35,204 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:37:35,213 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:37:35,219 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:37:35,220 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:37:35,220 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:37:35,220 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76c12008{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:37:35,221 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4933c3c6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:37:35,348 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d253c27{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/java.io.tmpdir/jetty-localhost-37991-hadoop-hdfs-3_4_1-tests_jar-_-any-5633767750132587924/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:35,349 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5416c192{HTTP/1.1, (http/1.1)}{localhost:37991} 2024-11-22T03:37:35,349 INFO [Time-limited test {}] server.Server(415): Started @174766ms 2024-11-22T03:37:35,350 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:37:35,386 WARN [ResponseProcessor for block BP-1619542008-172.17.0.3-1732246639125:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1619542008-172.17.0.3-1732246639125:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:35,386 WARN [ResponseProcessor for block BP-1619542008-172.17.0.3-1732246639125:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1619542008-172.17.0.3-1732246639125:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:35,386 WARN [ResponseProcessor for block BP-1619542008-172.17.0.3-1732246639125:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1619542008-172.17.0.3-1732246639125:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:35,387 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1058929704_22 at /127.0.0.1:53344 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46577:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53344 dst: /127.0.0.1:46577 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:35,387 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1301876972_22 at /127.0.0.1:53368 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46577:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53368 dst: /127.0.0.1:46577 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:35,387 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1301876972_22 at /127.0.0.1:53352 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46577:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53352 dst: /127.0.0.1:46577 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:35,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c9cf8e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:35,389 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a543eba{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:37:35,389 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:37:35,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c88640d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:37:35,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72003751{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,STOPPED} 2024-11-22T03:37:35,390 WARN [BP-1619542008-172.17.0.3-1732246639125 heartbeating to localhost/127.0.0.1:34053 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:37:35,390 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:37:35,390 WARN [BP-1619542008-172.17.0.3-1732246639125 heartbeating to localhost/127.0.0.1:34053 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1619542008-172.17.0.3-1732246639125 (Datanode Uuid 5f35e234-81b4-4577-86f5-8bb75b7b7867) service to localhost/127.0.0.1:34053 2024-11-22T03:37:35,390 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:37:35,390 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data1/current/BP-1619542008-172.17.0.3-1732246639125 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:35,391 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data2/current/BP-1619542008-172.17.0.3-1732246639125 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:35,391 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:37:35,405 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:37:35,409 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:37:35,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:37:35,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:37:35,409 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:37:35,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ab774b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:37:35,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38b4bdfd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:37:35,523 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2646398a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/java.io.tmpdir/jetty-localhost-44111-hadoop-hdfs-3_4_1-tests_jar-_-any-6210152851812894517/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:35,523 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@295e1e6a{HTTP/1.1, (http/1.1)}{localhost:44111} 2024-11-22T03:37:35,523 INFO [Time-limited test {}] server.Server(415): Started @174941ms 2024-11-22T03:37:35,525 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:37:36,020 WARN [Thread-1336 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:37:36,023 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb437ba69b9f703 with lease ID 0x6e168799b2bee363: from storage DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb node DatanodeRegistration(127.0.0.1:41833, datanodeUuid=167308fd-428f-4fb9-b897-8bc78e712518, infoPort=33021, infoSecurePort=0, ipcPort=45129, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:36,023 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb437ba69b9f703 with lease ID 0x6e168799b2bee363: from storage DS-012c31a7-2fe7-4d53-94f0-f75cb1f72d59 node DatanodeRegistration(127.0.0.1:41833, datanodeUuid=167308fd-428f-4fb9-b897-8bc78e712518, infoPort=33021, infoSecurePort=0, ipcPort=45129, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:36,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:36,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:36,185 WARN [Thread-1356 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:37:36,187 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4660c47c59737bea with lease ID 0x6e168799b2bee364: from storage DS-bf082d06-202c-45c4-97f0-078cf3beb7a4 node DatanodeRegistration(127.0.0.1:37655, datanodeUuid=5f35e234-81b4-4577-86f5-8bb75b7b7867, infoPort=34437, infoSecurePort=0, ipcPort=37417, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:36,188 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4660c47c59737bea with lease ID 0x6e168799b2bee364: from storage DS-93a76568-9857-4ed5-8600-d7e1b0c2d0cd node DatanodeRegistration(127.0.0.1:37655, datanodeUuid=5f35e234-81b4-4577-86f5-8bb75b7b7867, infoPort=34437, infoSecurePort=0, ipcPort=37417, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:36,550 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-22T03:37:36,552 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-22T03:37:36,554 ERROR [FSHLog-0-hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f-prefix:b458937b0f5f,36003,1732246641529 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:36,554 WARN [FSHLog-0-hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f-prefix:b458937b0f5f,36003,1732246641529 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:36,555 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C36003%2C1732246641529:(num 1732246642155) roll requested 2024-11-22T03:37:36,555 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C36003%2C1732246641529.1732246656555 2024-11-22T03:37:36,570 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 newFile=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 2024-11-22T03:37:36,571 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:36,571 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:36,571 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:36,571 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:36,571 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:36,571 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 2024-11-22T03:37:36,572 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:36,572 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:36,572 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 2024-11-22T03:37:36,573 WARN [IPC Server handler 0 on default port 34053 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-22T03:37:36,573 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 after 1ms 2024-11-22T03:37:36,586 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33021:33021),(127.0.0.1/127.0.0.1:34437:34437)] 2024-11-22T03:37:36,586 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 is not closed yet, will try archiving it next time 2024-11-22T03:37:37,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:37,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:38,023 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T03:37:38,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:38,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:38,590 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-22T03:37:39,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:39,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:40,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:40,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:40,574 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 after 4002ms 2024-11-22T03:37:40,594 WARN [ResponseProcessor for block BP-1619542008-172.17.0.3-1732246639125:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1619542008-172.17.0.3-1732246639125:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1619542008-172.17.0.3-1732246639125:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:37655,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:40,595 WARN [DataStreamer for file /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 block BP-1619542008-172.17.0.3-1732246639125:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1619542008-172.17.0.3-1732246639125:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41833,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK], DatanodeInfoWithStorage[127.0.0.1:37655,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37655,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]) is bad. 2024-11-22T03:37:40,595 WARN [PacketResponder: BP-1619542008-172.17.0.3-1732246639125:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37655] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:40,595 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1301876972_22 at /127.0.0.1:35818 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41833:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35818 dst: /127.0.0.1:41833 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:40,596 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1301876972_22 at /127.0.0.1:58884 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58884 dst: /127.0.0.1:37655 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:40,649 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2646398a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:40,649 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@295e1e6a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:37:40,650 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:37:40,650 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38b4bdfd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:37:40,650 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ab774b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,STOPPED} 2024-11-22T03:37:40,652 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:37:40,652 WARN [BP-1619542008-172.17.0.3-1732246639125 heartbeating to localhost/127.0.0.1:34053 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:37:40,652 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:37:40,652 WARN [BP-1619542008-172.17.0.3-1732246639125 heartbeating to localhost/127.0.0.1:34053 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1619542008-172.17.0.3-1732246639125 (Datanode Uuid 5f35e234-81b4-4577-86f5-8bb75b7b7867) service to localhost/127.0.0.1:34053 2024-11-22T03:37:40,653 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data1/current/BP-1619542008-172.17.0.3-1732246639125 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:40,654 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data2/current/BP-1619542008-172.17.0.3-1732246639125 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:40,654 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:37:40,662 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:37:40,667 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:37:40,668 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:37:40,668 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:37:40,668 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:37:40,668 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22201455{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:37:40,669 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8b56b63{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:37:40,774 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@489d8b52{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/java.io.tmpdir/jetty-localhost-40703-hadoop-hdfs-3_4_1-tests_jar-_-any-5741902832625722878/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:40,774 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@207e03ac{HTTP/1.1, (http/1.1)}{localhost:40703} 2024-11-22T03:37:40,774 INFO [Time-limited test {}] server.Server(415): Started @180192ms 2024-11-22T03:37:40,775 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:37:40,805 WARN [ResponseProcessor for block BP-1619542008-172.17.0.3-1732246639125:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1619542008-172.17.0.3-1732246639125:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:40,805 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1301876972_22 at /127.0.0.1:35836 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41833:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35836 dst: /127.0.0.1:41833 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:40,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d253c27{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:40,813 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5416c192{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:37:40,813 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:37:40,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4933c3c6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:37:40,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76c12008{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,STOPPED} 2024-11-22T03:37:40,814 WARN [BP-1619542008-172.17.0.3-1732246639125 heartbeating to localhost/127.0.0.1:34053 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:37:40,814 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:37:40,814 WARN [BP-1619542008-172.17.0.3-1732246639125 heartbeating to localhost/127.0.0.1:34053 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1619542008-172.17.0.3-1732246639125 (Datanode Uuid 167308fd-428f-4fb9-b897-8bc78e712518) service to localhost/127.0.0.1:34053 2024-11-22T03:37:40,814 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:37:40,815 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data3/current/BP-1619542008-172.17.0.3-1732246639125 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:40,815 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:37:40,815 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data4/current/BP-1619542008-172.17.0.3-1732246639125 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:40,833 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:37:40,836 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:37:40,837 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:37:40,837 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:37:40,837 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:37:40,837 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62874ecc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:37:40,838 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e83a469{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:37:40,939 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@785df7ea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/java.io.tmpdir/jetty-localhost-45321-hadoop-hdfs-3_4_1-tests_jar-_-any-8980744503920851773/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:40,939 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@67293df{HTTP/1.1, (http/1.1)}{localhost:45321} 2024-11-22T03:37:40,939 INFO [Time-limited test {}] server.Server(415): Started @180357ms 2024-11-22T03:37:40,941 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:37:41,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:41,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:41,330 WARN [Thread-1410 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:37:41,332 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8cc3826eb6b31770 with lease ID 0x6e168799b2bee365: from storage DS-bf082d06-202c-45c4-97f0-078cf3beb7a4 node DatanodeRegistration(127.0.0.1:43053, datanodeUuid=5f35e234-81b4-4577-86f5-8bb75b7b7867, infoPort=32865, infoSecurePort=0, ipcPort=35393, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:41,333 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8cc3826eb6b31770 with lease ID 0x6e168799b2bee365: from storage DS-93a76568-9857-4ed5-8600-d7e1b0c2d0cd node DatanodeRegistration(127.0.0.1:43053, datanodeUuid=5f35e234-81b4-4577-86f5-8bb75b7b7867, infoPort=32865, infoSecurePort=0, ipcPort=35393, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:41,457 WARN [Thread-1430 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:37:41,460 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6e6ce088a6dd9d8 with lease ID 0x6e168799b2bee366: from storage DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb node DatanodeRegistration(127.0.0.1:41657, datanodeUuid=167308fd-428f-4fb9-b897-8bc78e712518, infoPort=33091, infoSecurePort=0, ipcPort=44893, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:41,460 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6e6ce088a6dd9d8 with lease ID 0x6e168799b2bee366: from storage DS-012c31a7-2fe7-4d53-94f0-f75cb1f72d59 node DatanodeRegistration(127.0.0.1:41657, datanodeUuid=167308fd-428f-4fb9-b897-8bc78e712518, infoPort=33091, infoSecurePort=0, ipcPort=44893, storageInfo=lv=-57;cid=testClusterID;nsid=1747893720;c=1732246639125), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:41,962 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-22T03:37:41,964 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-22T03:37:41,965 ERROR [FSHLog-0-hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f-prefix:b458937b0f5f,36003,1732246641529 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41833,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:41,965 WARN [FSHLog-0-hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f-prefix:b458937b0f5f,36003,1732246641529 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41833,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:41,965 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C36003%2C1732246641529:(num 1732246656555) roll requested 2024-11-22T03:37:41,965 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C36003%2C1732246641529.1732246661965 2024-11-22T03:37:41,972 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 newFile=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246661965 2024-11-22T03:37:41,972 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:41,972 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:41,972 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:41,973 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:41,973 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:41,973 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246661965 2024-11-22T03:37:41,973 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41833,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:41,973 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41833,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:41,973 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 2024-11-22T03:37:41,974 WARN [IPC Server handler 3 on default port 34053 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-22T03:37:41,974 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32865:32865),(127.0.0.1/127.0.0.1:33091:33091)] 2024-11-22T03:37:41,974 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 after 1ms 2024-11-22T03:37:41,974 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 is not closed yet, will try archiving it next time 2024-11-22T03:37:42,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:42,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:43,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:43,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:43,976 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C36003%2C1732246641529.1732246663975 2024-11-22T03:37:43,987 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246661965 newFile=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 2024-11-22T03:37:43,987 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:43,987 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:43,987 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:43,987 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:43,987 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:43,988 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246661965 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 2024-11-22T03:37:43,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741838_1019 (size=1264) 2024-11-22T03:37:43,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741838_1019 (size=1264) 2024-11-22T03:37:43,990 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32865:32865),(127.0.0.1/127.0.0.1:33091:33091)] 2024-11-22T03:37:43,990 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 is not closed yet, will try archiving it next time 2024-11-22T03:37:43,990 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 is not closed yet, will try archiving it next time 2024-11-22T03:37:43,991 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 2024-11-22T03:37:43,991 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 2024-11-22T03:37:43,991 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 after 0ms 2024-11-22T03:37:43,991 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 2024-11-22T03:37:44,002 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732246643334/Put/vlen=218/seqid=0] 2024-11-22T03:37:44,002 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732246653096/Put/vlen=1045/seqid=0] 2024-11-22T03:37:44,003 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246642155 2024-11-22T03:37:44,003 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 2024-11-22T03:37:44,003 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 2024-11-22T03:37:44,003 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 after 0ms 2024-11-22T03:37:44,003 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 2024-11-22T03:37:44,007 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732246656554/Put/vlen=1045/seqid=0] 2024-11-22T03:37:44,007 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732246658592/Put/vlen=1045/seqid=0] 2024-11-22T03:37:44,007 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 2024-11-22T03:37:44,007 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246661965 2024-11-22T03:37:44,007 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246661965 2024-11-22T03:37:44,008 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246661965 after 1ms 2024-11-22T03:37:44,008 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246661965 2024-11-22T03:37:44,011 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732246661965/Put/vlen=1045/seqid=0] 2024-11-22T03:37:44,011 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 2024-11-22T03:37:44,011 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 2024-11-22T03:37:44,012 WARN [IPC Server handler 2 on default port 34053 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-22T03:37:44,012 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 after 1ms 2024-11-22T03:37:44,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:44,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:44,463 WARN [ResponseProcessor for block BP-1619542008-172.17.0.3-1732246639125:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1619542008-172.17.0.3-1732246639125:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:44,463 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1058929704_22 at /127.0.0.1:33776 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43053:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33776 dst: /127.0.0.1:43053 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:43053 remote=/127.0.0.1:33776]. Total timeout mills is 60000, 59524 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:44,463 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1058929704_22 at /127.0.0.1:52206 [Receiving block BP-1619542008-172.17.0.3-1732246639125:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52206 dst: /127.0.0.1:41657 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:37:44,463 WARN [DataStreamer for file /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 block BP-1619542008-172.17.0.3-1732246639125:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1619542008-172.17.0.3-1732246639125:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43053,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK], DatanodeInfoWithStorage[127.0.0.1:41657,DS-9eb3dfbe-12b4-446a-8c31-b3faf6c99ffb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43053,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]) is bad. 2024-11-22T03:37:44,468 WARN [DataStreamer for file /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 block BP-1619542008-172.17.0.3-1732246639125:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1619542008-172.17.0.3-1732246639125:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:44,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741839_1022 (size=85) 2024-11-22T03:37:45,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:45,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:45,975 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246656555 after 4002ms 2024-11-22T03:37:46,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:46,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:47,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:47,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:47,334 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T03:37:48,013 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 after 4002ms 2024-11-22T03:37:48,013 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 2024-11-22T03:37:48,019 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 2024-11-22T03:37:48,020 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-22T03:37:48,020 ERROR [FSHLog-0-hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f-prefix:b458937b0f5f,36003,1732246641529.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:48,020 WARN [FSHLog-0-hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f-prefix:b458937b0f5f,36003,1732246641529.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:48,021 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C36003%2C1732246641529.meta:.meta(num 1732246642784) roll requested 2024-11-22T03:37:48,021 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C36003%2C1732246641529.meta.1732246668021.meta 2024-11-22T03:37:48,027 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,027 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,027 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,027 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,027 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,027 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.meta.1732246642784.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.meta.1732246668021.meta 2024-11-22T03:37:48,027 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:48,028 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:48,028 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.meta.1732246642784.meta 2024-11-22T03:37:48,028 WARN [IPC Server handler 4 on default port 34053 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.meta.1732246642784.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1015 2024-11-22T03:37:48,028 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32865:32865),(127.0.0.1/127.0.0.1:33091:33091)] 2024-11-22T03:37:48,028 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.meta.1732246642784.meta is not closed yet, will try archiving it next time 2024-11-22T03:37:48,028 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.meta.1732246642784.meta after 0ms 2024-11-22T03:37:48,045 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/.tmp/info/d029de296eca41d7ad2bb9ad27f35ac3 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545./info:regioninfo/1732246643339/Put/seqid=0 2024-11-22T03:37:48,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741841_1025 (size=7125) 2024-11-22T03:37:48,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741841_1025 (size=7125) 2024-11-22T03:37:48,051 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/.tmp/info/d029de296eca41d7ad2bb9ad27f35ac3 2024-11-22T03:37:48,073 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/.tmp/ns/3e5f4ab6213f46dcacec2e52111b5edf is 43, key is default/ns:d/1732246642867/Put/seqid=0 2024-11-22T03:37:48,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741842_1026 (size=5153) 2024-11-22T03:37:48,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741842_1026 (size=5153) 2024-11-22T03:37:48,079 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/.tmp/ns/3e5f4ab6213f46dcacec2e52111b5edf 2024-11-22T03:37:48,100 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/.tmp/table/fc7d7791b63a42f1a439f703fd6d4aeb is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732246643349/Put/seqid=0 2024-11-22T03:37:48,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741843_1027 (size=5438) 2024-11-22T03:37:48,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741843_1027 (size=5438) 2024-11-22T03:37:48,106 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/.tmp/table/fc7d7791b63a42f1a439f703fd6d4aeb 2024-11-22T03:37:48,113 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/.tmp/info/d029de296eca41d7ad2bb9ad27f35ac3 as hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/info/d029de296eca41d7ad2bb9ad27f35ac3 2024-11-22T03:37:48,119 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/info/d029de296eca41d7ad2bb9ad27f35ac3, entries=10, sequenceid=11, filesize=7.0 K 2024-11-22T03:37:48,120 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/.tmp/ns/3e5f4ab6213f46dcacec2e52111b5edf as hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/ns/3e5f4ab6213f46dcacec2e52111b5edf 2024-11-22T03:37:48,127 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/ns/3e5f4ab6213f46dcacec2e52111b5edf, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T03:37:48,128 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/.tmp/table/fc7d7791b63a42f1a439f703fd6d4aeb as hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/table/fc7d7791b63a42f1a439f703fd6d4aeb 2024-11-22T03:37:48,135 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/table/fc7d7791b63a42f1a439f703fd6d4aeb, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T03:37:48,136 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 116ms, sequenceid=11, compaction requested=false 2024-11-22T03:37:48,137 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T03:37:48,137 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing f7611e418df107e3e0c5768c0e301545 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-22T03:37:48,137 ERROR [FSHLog-0-hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f-prefix:b458937b0f5f,36003,1732246641529 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1619542008-172.17.0.3-1732246639125:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:48,138 WARN [FSHLog-0-hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f-prefix:b458937b0f5f,36003,1732246641529 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1619542008-172.17.0.3-1732246639125:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:48,138 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C36003%2C1732246641529:(num 1732246663975) roll requested 2024-11-22T03:37:48,138 INFO [regionserver/b458937b0f5f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C36003%2C1732246641529.1732246668138 2024-11-22T03:37:48,144 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 newFile=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246668138 2024-11-22T03:37:48,144 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,144 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,144 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,145 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,145 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,145 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246668138 2024-11-22T03:37:48,145 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1619542008-172.17.0.3-1732246639125:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:48,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:48,145 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1619542008-172.17.0.3-1732246639125:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:48,146 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 2024-11-22T03:37:48,146 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 after 0ms 2024-11-22T03:37:48,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:48,151 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.1732246663975 to hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/oldWALs/b458937b0f5f%2C36003%2C1732246641529.1732246663975 2024-11-22T03:37:48,151 DEBUG [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32865:32865),(127.0.0.1/127.0.0.1:33091:33091)] 2024-11-22T03:37:48,168 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/default/TestLogRolling-testLogRollOnPipelineRestart/f7611e418df107e3e0c5768c0e301545/.tmp/info/3c061af2c32e43c89c20b3288616b775 is 1080, key is row1002/info:/1732246653096/Put/seqid=0 2024-11-22T03:37:48,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741845_1029 (size=9270) 2024-11-22T03:37:48,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741845_1029 (size=9270) 2024-11-22T03:37:48,174 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/default/TestLogRolling-testLogRollOnPipelineRestart/f7611e418df107e3e0c5768c0e301545/.tmp/info/3c061af2c32e43c89c20b3288616b775 2024-11-22T03:37:48,181 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/default/TestLogRolling-testLogRollOnPipelineRestart/f7611e418df107e3e0c5768c0e301545/.tmp/info/3c061af2c32e43c89c20b3288616b775 as hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/default/TestLogRolling-testLogRollOnPipelineRestart/f7611e418df107e3e0c5768c0e301545/info/3c061af2c32e43c89c20b3288616b775 2024-11-22T03:37:48,188 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/default/TestLogRolling-testLogRollOnPipelineRestart/f7611e418df107e3e0c5768c0e301545/info/3c061af2c32e43c89c20b3288616b775, entries=4, sequenceid=8, filesize=9.1 K 2024-11-22T03:37:48,189 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for f7611e418df107e3e0c5768c0e301545 in 52ms, sequenceid=8, compaction requested=false 2024-11-22T03:37:48,189 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for f7611e418df107e3e0c5768c0e301545: 2024-11-22T03:37:48,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:37:48,196 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:37:48,196 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:37:48,196 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:48,196 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:48,196 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:37:48,196 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:37:48,196 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=298236265, stopped=false 2024-11-22T03:37:48,196 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b458937b0f5f,41217,1732246641358 2024-11-22T03:37:48,267 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:37:48,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:37:48,267 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:48,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:48,267 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:37:48,267 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:37:48,267 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:37:48,268 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:48,268 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:37:48,268 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b458937b0f5f,36003,1732246641529' ***** 2024-11-22T03:37:48,268 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:37:48,269 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:37:48,269 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:37:48,269 INFO [RS:0;b458937b0f5f:36003 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:37:48,269 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:37:48,269 INFO [RS:0;b458937b0f5f:36003 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:37:48,270 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(3091): Received CLOSE for f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:48,270 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(959): stopping server b458937b0f5f,36003,1732246641529 2024-11-22T03:37:48,270 INFO [RS:0;b458937b0f5f:36003 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:37:48,270 INFO [RS:0;b458937b0f5f:36003 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b458937b0f5f:36003. 2024-11-22T03:37:48,270 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f7611e418df107e3e0c5768c0e301545, disabling compactions & flushes 2024-11-22T03:37:48,270 DEBUG [RS:0;b458937b0f5f:36003 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:37:48,270 DEBUG [RS:0;b458937b0f5f:36003 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:48,270 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:48,270 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:48,270 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. after waiting 0 ms 2024-11-22T03:37:48,270 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:48,270 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:37:48,270 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:37:48,271 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:37:48,271 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:37:48,271 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T03:37:48,271 DEBUG [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, f7611e418df107e3e0c5768c0e301545=TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545.} 2024-11-22T03:37:48,271 DEBUG [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, f7611e418df107e3e0c5768c0e301545 2024-11-22T03:37:48,271 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:37:48,271 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:37:48,271 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:37:48,271 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:37:48,271 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:37:48,276 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/default/TestLogRolling-testLogRollOnPipelineRestart/f7611e418df107e3e0c5768c0e301545/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-22T03:37:48,277 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:48,277 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f7611e418df107e3e0c5768c0e301545: Waiting for close lock at 1732246668270Running coprocessor pre-close hooks at 1732246668270Disabling compacts and flushes for region at 1732246668270Disabling writes for close at 1732246668270Writing region close event to WAL at 1732246668271 (+1 ms)Running coprocessor post-close hooks at 1732246668277 (+6 ms)Closed at 1732246668277 2024-11-22T03:37:48,277 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T03:37:48,277 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732246642971.f7611e418df107e3e0c5768c0e301545. 2024-11-22T03:37:48,277 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:37:48,278 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:37:48,278 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246668271Running coprocessor pre-close hooks at 1732246668271Disabling compacts and flushes for region at 1732246668271Disabling writes for close at 1732246668271Writing region close event to WAL at 1732246668273 (+2 ms)Running coprocessor post-close hooks at 1732246668277 (+4 ms)Closed at 1732246668278 (+1 ms) 2024-11-22T03:37:48,278 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:37:48,471 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(976): stopping server b458937b0f5f,36003,1732246641529; all regions closed. 2024-11-22T03:37:48,472 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,472 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,472 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,472 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,472 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:48,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741840_1023 (size=825) 2024-11-22T03:37:48,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741840_1023 (size=825) 2024-11-22T03:37:49,017 INFO [regionserver/b458937b0f5f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T03:37:49,017 INFO [regionserver/b458937b0f5f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T03:37:49,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:49,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:50,018 INFO [regionserver/b458937b0f5f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:37:50,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:50,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:50,460 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T03:37:51,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:51,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:51,339 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:37:52,029 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.meta.1732246642784.meta after 4001ms 2024-11-22T03:37:52,030 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/WALs/b458937b0f5f,36003,1732246641529/b458937b0f5f%2C36003%2C1732246641529.meta.1732246642784.meta to hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/oldWALs/b458937b0f5f%2C36003%2C1732246641529.meta.1732246642784.meta 2024-11-22T03:37:52,033 DEBUG [RS:0;b458937b0f5f:36003 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/oldWALs 2024-11-22T03:37:52,033 INFO [RS:0;b458937b0f5f:36003 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C36003%2C1732246641529.meta:.meta(num 1732246668021) 2024-11-22T03:37:52,033 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,033 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,033 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,033 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,034 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741844_1028 (size=1162) 2024-11-22T03:37:52,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741844_1028 (size=1162) 2024-11-22T03:37:52,041 DEBUG [RS:0;b458937b0f5f:36003 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/oldWALs 2024-11-22T03:37:52,041 INFO [RS:0;b458937b0f5f:36003 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C36003%2C1732246641529:(num 1732246668138) 2024-11-22T03:37:52,041 DEBUG [RS:0;b458937b0f5f:36003 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:37:52,041 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:37:52,042 INFO [RS:0;b458937b0f5f:36003 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:37:52,042 INFO [RS:0;b458937b0f5f:36003 {}] hbase.ChoreService(370): Chore service for: regionserver/b458937b0f5f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:37:52,042 INFO [RS:0;b458937b0f5f:36003 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:37:52,042 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:37:52,042 INFO [RS:0;b458937b0f5f:36003 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36003 2024-11-22T03:37:52,087 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b458937b0f5f,36003,1732246641529 2024-11-22T03:37:52,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:37:52,087 INFO [RS:0;b458937b0f5f:36003 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:37:52,099 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b458937b0f5f,36003,1732246641529] 2024-11-22T03:37:52,109 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b458937b0f5f,36003,1732246641529 already deleted, retry=false 2024-11-22T03:37:52,110 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b458937b0f5f,36003,1732246641529 expired; onlineServers=0 2024-11-22T03:37:52,110 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b458937b0f5f,41217,1732246641358' ***** 2024-11-22T03:37:52,110 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:37:52,110 INFO [M:0;b458937b0f5f:41217 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:37:52,110 INFO [M:0;b458937b0f5f:41217 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:37:52,110 DEBUG [M:0;b458937b0f5f:41217 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:37:52,110 DEBUG [M:0;b458937b0f5f:41217 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:37:52,110 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:37:52,110 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246641954 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246641954,5,FailOnTimeoutGroup] 2024-11-22T03:37:52,110 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246641954 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246641954,5,FailOnTimeoutGroup] 2024-11-22T03:37:52,111 INFO [M:0;b458937b0f5f:41217 {}] hbase.ChoreService(370): Chore service for: master/b458937b0f5f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:37:52,111 INFO [M:0;b458937b0f5f:41217 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:37:52,111 DEBUG [M:0;b458937b0f5f:41217 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:37:52,111 INFO [M:0;b458937b0f5f:41217 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:37:52,111 INFO [M:0;b458937b0f5f:41217 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:37:52,112 INFO [M:0;b458937b0f5f:41217 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:37:52,112 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:37:52,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:37:52,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:52,120 DEBUG [M:0;b458937b0f5f:41217 {}] zookeeper.ZKUtil(347): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:37:52,120 WARN [M:0;b458937b0f5f:41217 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:37:52,121 INFO [M:0;b458937b0f5f:41217 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/.lastflushedseqids 2024-11-22T03:37:52,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741846_1030 (size=111) 2024-11-22T03:37:52,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741846_1030 (size=111) 2024-11-22T03:37:52,128 INFO [M:0;b458937b0f5f:41217 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:37:52,128 INFO [M:0;b458937b0f5f:41217 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:37:52,128 DEBUG [M:0;b458937b0f5f:41217 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:37:52,128 INFO [M:0;b458937b0f5f:41217 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:52,128 DEBUG [M:0;b458937b0f5f:41217 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:52,128 DEBUG [M:0;b458937b0f5f:41217 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:37:52,128 DEBUG [M:0;b458937b0f5f:41217 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:52,128 INFO [M:0;b458937b0f5f:41217 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-22T03:37:52,129 ERROR [FSHLog-0-hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData-prefix:b458937b0f5f,41217,1732246641358 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:52,129 WARN [FSHLog-0-hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData-prefix:b458937b0f5f,41217,1732246641358 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:52,129 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog b458937b0f5f%2C41217%2C1732246641358:(num 1732246641671) roll requested 2024-11-22T03:37:52,129 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C41217%2C1732246641358.1732246672129 2024-11-22T03:37:52,134 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,134 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,135 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,135 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,135 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,135 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/WALs/b458937b0f5f,41217,1732246641358/b458937b0f5f%2C41217%2C1732246641358.1732246641671 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/WALs/b458937b0f5f,41217,1732246641358/b458937b0f5f%2C41217%2C1732246641358.1732246672129 2024-11-22T03:37:52,135 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:52,135 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46577,DS-bf082d06-202c-45c4-97f0-078cf3beb7a4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:37:52,135 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/WALs/b458937b0f5f,41217,1732246641358/b458937b0f5f%2C41217%2C1732246641358.1732246641671 2024-11-22T03:37:52,136 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32865:32865),(127.0.0.1/127.0.0.1:33091:33091)] 2024-11-22T03:37:52,136 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/WALs/b458937b0f5f,41217,1732246641358/b458937b0f5f%2C41217%2C1732246641358.1732246641671 is not closed yet, will try archiving it next time 2024-11-22T03:37:52,136 WARN [IPC Server handler 4 on default port 34053 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/WALs/b458937b0f5f,41217,1732246641358/b458937b0f5f%2C41217%2C1732246641358.1732246641671 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-22T03:37:52,136 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/WALs/b458937b0f5f,41217,1732246641358/b458937b0f5f%2C41217%2C1732246641358.1732246641671 after 1ms 2024-11-22T03:37:52,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:52,151 DEBUG [M:0;b458937b0f5f:41217 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fa4a0a416e564b7abdea7f067b2aa0ec is 82, key is hbase:meta,,1/info:regioninfo/1732246642817/Put/seqid=0 2024-11-22T03:37:52,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:52,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741848_1033 (size=5672) 2024-11-22T03:37:52,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741848_1033 (size=5672) 2024-11-22T03:37:52,157 INFO [M:0;b458937b0f5f:41217 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fa4a0a416e564b7abdea7f067b2aa0ec 2024-11-22T03:37:52,179 DEBUG [M:0;b458937b0f5f:41217 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ce486039dce44849bce70b68c61617b9 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732246643354/Put/seqid=0 2024-11-22T03:37:52,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741849_1034 (size=6117) 2024-11-22T03:37:52,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741849_1034 (size=6117) 2024-11-22T03:37:52,184 INFO [M:0;b458937b0f5f:41217 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ce486039dce44849bce70b68c61617b9 2024-11-22T03:37:52,199 INFO [RS:0;b458937b0f5f:36003 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:37:52,199 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:37:52,199 INFO [RS:0;b458937b0f5f:36003 {}] regionserver.HRegionServer(1031): Exiting; stopping=b458937b0f5f,36003,1732246641529; zookeeper connection closed. 2024-11-22T03:37:52,199 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36003-0x101609e16690001, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:37:52,199 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2398717d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2398717d 2024-11-22T03:37:52,200 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T03:37:52,205 DEBUG [M:0;b458937b0f5f:41217 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/921f490ee7d84051a07ad212b99a2bab is 69, key is b458937b0f5f,36003,1732246641529/rs:state/1732246641995/Put/seqid=0 2024-11-22T03:37:52,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741850_1035 (size=5156) 2024-11-22T03:37:52,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741850_1035 (size=5156) 2024-11-22T03:37:52,210 INFO [M:0;b458937b0f5f:41217 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/921f490ee7d84051a07ad212b99a2bab 2024-11-22T03:37:52,233 DEBUG [M:0;b458937b0f5f:41217 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21a35f2e6b16405a9efb9735d6993c57 is 52, key is load_balancer_on/state:d/1732246642965/Put/seqid=0 2024-11-22T03:37:52,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741851_1036 (size=5056) 2024-11-22T03:37:52,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741851_1036 (size=5056) 2024-11-22T03:37:52,238 INFO [M:0;b458937b0f5f:41217 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21a35f2e6b16405a9efb9735d6993c57 2024-11-22T03:37:52,244 DEBUG [M:0;b458937b0f5f:41217 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fa4a0a416e564b7abdea7f067b2aa0ec as hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fa4a0a416e564b7abdea7f067b2aa0ec 2024-11-22T03:37:52,250 INFO [M:0;b458937b0f5f:41217 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fa4a0a416e564b7abdea7f067b2aa0ec, entries=8, sequenceid=56, filesize=5.5 K 2024-11-22T03:37:52,251 DEBUG [M:0;b458937b0f5f:41217 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ce486039dce44849bce70b68c61617b9 as hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ce486039dce44849bce70b68c61617b9 2024-11-22T03:37:52,257 INFO [M:0;b458937b0f5f:41217 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ce486039dce44849bce70b68c61617b9, entries=6, sequenceid=56, filesize=6.0 K 2024-11-22T03:37:52,258 DEBUG [M:0;b458937b0f5f:41217 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/921f490ee7d84051a07ad212b99a2bab as hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/921f490ee7d84051a07ad212b99a2bab 2024-11-22T03:37:52,264 INFO [M:0;b458937b0f5f:41217 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/921f490ee7d84051a07ad212b99a2bab, entries=1, sequenceid=56, filesize=5.0 K 2024-11-22T03:37:52,265 DEBUG [M:0;b458937b0f5f:41217 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21a35f2e6b16405a9efb9735d6993c57 as hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/21a35f2e6b16405a9efb9735d6993c57 2024-11-22T03:37:52,271 INFO [M:0;b458937b0f5f:41217 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/21a35f2e6b16405a9efb9735d6993c57, entries=1, sequenceid=56, filesize=4.9 K 2024-11-22T03:37:52,272 INFO [M:0;b458937b0f5f:41217 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 144ms, sequenceid=56, compaction requested=false 2024-11-22T03:37:52,274 INFO [M:0;b458937b0f5f:41217 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:52,274 DEBUG [M:0;b458937b0f5f:41217 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246672128Disabling compacts and flushes for region at 1732246672128Disabling writes for close at 1732246672128Obtaining lock to block concurrent updates at 1732246672128Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732246672128Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1732246672129 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732246672136 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732246672136Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732246672151 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732246672151Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732246672162 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732246672178 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732246672178Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732246672190 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732246672205 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732246672205Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732246672216 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732246672232 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732246672232Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@268e252: reopening flushed file at 1732246672243 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24a28078: reopening flushed file at 1732246672250 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25f57207: reopening flushed file at 1732246672257 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@244bd4c7: reopening flushed file at 1732246672264 (+7 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 144ms, sequenceid=56, compaction requested=false at 1732246672273 (+9 ms)Writing region close event to WAL at 1732246672274 (+1 ms)Closed at 1732246672274 2024-11-22T03:37:52,274 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,274 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,274 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,275 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,275 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:37:52,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741847_1031 (size=757) 2024-11-22T03:37:52,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43053 is added to blk_1073741847_1031 (size=757) 2024-11-22T03:37:53,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:53,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:53,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,460 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T03:37:53,815 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:37:53,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,837 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,837 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,837 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,837 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:53,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:37:54,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:54,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:55,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:55,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:56,137 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/WALs/b458937b0f5f,41217,1732246641358/b458937b0f5f%2C41217%2C1732246641358.1732246641671 after 4002ms 2024-11-22T03:37:56,138 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/WALs/b458937b0f5f,41217,1732246641358/b458937b0f5f%2C41217%2C1732246641358.1732246641671 to hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/oldWALs/b458937b0f5f%2C41217%2C1732246641358.1732246641671 2024-11-22T03:37:56,141 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/MasterData/oldWALs/b458937b0f5f%2C41217%2C1732246641358.1732246641671 to hdfs://localhost:34053/user/jenkins/test-data/e5058cc8-a482-7302-152b-526754f62c7f/oldWALs/b458937b0f5f%2C41217%2C1732246641358.1732246641671$masterlocalwal$ 2024-11-22T03:37:56,141 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:37:56,141 INFO [M:0;b458937b0f5f:41217 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:37:56,141 INFO [M:0;b458937b0f5f:41217 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41217 2024-11-22T03:37:56,142 INFO [M:0;b458937b0f5f:41217 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:37:56,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:56,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:56,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:37:56,303 INFO [M:0;b458937b0f5f:41217 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:37:56,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41217-0x101609e16690000, quorum=127.0.0.1:64530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:37:56,307 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@785df7ea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:56,307 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@67293df{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:37:56,307 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:37:56,307 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e83a469{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:37:56,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62874ecc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,STOPPED} 2024-11-22T03:37:56,309 WARN [BP-1619542008-172.17.0.3-1732246639125 heartbeating to localhost/127.0.0.1:34053 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:37:56,309 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:37:56,309 WARN [BP-1619542008-172.17.0.3-1732246639125 heartbeating to localhost/127.0.0.1:34053 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1619542008-172.17.0.3-1732246639125 (Datanode Uuid 167308fd-428f-4fb9-b897-8bc78e712518) service to localhost/127.0.0.1:34053 2024-11-22T03:37:56,309 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:37:56,310 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data3/current/BP-1619542008-172.17.0.3-1732246639125 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:56,310 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data4/current/BP-1619542008-172.17.0.3-1732246639125 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:56,310 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:37:56,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@489d8b52{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:56,313 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@207e03ac{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:37:56,314 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:37:56,314 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8b56b63{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:37:56,314 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22201455{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,STOPPED} 2024-11-22T03:37:56,315 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:37:56,315 WARN [BP-1619542008-172.17.0.3-1732246639125 heartbeating to localhost/127.0.0.1:34053 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:37:56,315 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:37:56,315 WARN [BP-1619542008-172.17.0.3-1732246639125 heartbeating to localhost/127.0.0.1:34053 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1619542008-172.17.0.3-1732246639125 (Datanode Uuid 5f35e234-81b4-4577-86f5-8bb75b7b7867) service to localhost/127.0.0.1:34053 2024-11-22T03:37:56,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data1/current/BP-1619542008-172.17.0.3-1732246639125 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:56,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/cluster_638acc76-d974-2fa8-faff-7d73b24f46aa/data/data2/current/BP-1619542008-172.17.0.3-1732246639125 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:37:56,316 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:37:56,322 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35a3721f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:37:56,323 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4946acec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:37:56,323 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:37:56,323 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e0a81cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:37:56,323 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e024519{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir/,STOPPED} 2024-11-22T03:37:56,329 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:37:56,351 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:37:56,360 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 156) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:34053 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34053 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34053 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34053 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:34053 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:34053 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34053 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:34053 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=248 (was 274), ProcessCount=11 (was 11), AvailableMemoryMB=6099 (was 6323) 2024-11-22T03:37:56,368 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=248, ProcessCount=11, AvailableMemoryMB=6098 2024-11-22T03:37:56,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:37:56,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.log.dir so I do NOT create it in target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9 2024-11-22T03:37:56,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b76974f3-95f3-8849-237d-3bdb36f18bcf/hadoop.tmp.dir so I do NOT create it in target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9 2024-11-22T03:37:56,369 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/cluster_49f704a3-79b4-e8f2-009f-8c87de040efc, deleteOnExit=true 2024-11-22T03:37:56,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:37:56,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/test.cache.data in system properties and HBase conf 2024-11-22T03:37:56,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:37:56,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:37:56,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:37:56,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:37:56,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:37:56,369 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:37:56,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:37:56,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:37:56,383 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:37:56,850 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:37:56,854 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:37:56,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:37:56,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:37:56,856 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:37:56,857 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:37:56,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37c41708{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:37:56,864 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d8b64e8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:37:56,972 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@dd9ec9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/java.io.tmpdir/jetty-localhost-46491-hadoop-hdfs-3_4_1-tests_jar-_-any-1933407107069643965/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:37:56,973 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fa77a2d{HTTP/1.1, (http/1.1)}{localhost:46491} 2024-11-22T03:37:56,973 INFO [Time-limited test {}] server.Server(415): Started @196390ms 2024-11-22T03:37:56,985 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:37:57,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:57,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:57,248 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:37:57,252 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:37:57,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:37:57,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:37:57,252 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:37:57,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7528c100{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:37:57,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c8600db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:37:57,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26c94d53{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/java.io.tmpdir/jetty-localhost-43123-hadoop-hdfs-3_4_1-tests_jar-_-any-17035441027590982543/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:57,360 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e1f0e5b{HTTP/1.1, (http/1.1)}{localhost:43123} 2024-11-22T03:37:57,360 INFO [Time-limited test {}] server.Server(415): Started @196777ms 2024-11-22T03:37:57,361 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:37:57,386 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:37:57,390 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:37:57,390 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:37:57,390 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:37:57,390 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:37:57,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2128b0cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:37:57,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4921a1b1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:37:57,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59d5bc4a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/java.io.tmpdir/jetty-localhost-44227-hadoop-hdfs-3_4_1-tests_jar-_-any-7556261206327216112/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:37:57,495 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d30f349{HTTP/1.1, (http/1.1)}{localhost:44227} 2024-11-22T03:37:57,495 INFO [Time-limited test {}] server.Server(415): Started @196912ms 2024-11-22T03:37:57,496 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:37:57,891 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:37:57,892 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:37:57,892 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T03:37:57,892 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T03:37:58,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:58,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:58,778 WARN [Thread-1650 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/cluster_49f704a3-79b4-e8f2-009f-8c87de040efc/data/data1/current/BP-513654789-172.17.0.3-1732246676394/current, will proceed with Du for space computation calculation, 2024-11-22T03:37:58,778 WARN [Thread-1651 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/cluster_49f704a3-79b4-e8f2-009f-8c87de040efc/data/data2/current/BP-513654789-172.17.0.3-1732246676394/current, will proceed with Du for space computation calculation, 2024-11-22T03:37:58,796 WARN [Thread-1614 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:37:58,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83e1a8895d9a6734 with lease ID 0xa01e7d39d581ea4d: Processing first storage report for DS-278cd7ce-4604-43d6-8504-c0bca0cfd1f0 from datanode DatanodeRegistration(127.0.0.1:34585, datanodeUuid=32bb15c2-e35d-494b-aaf4-9678949ef5ff, infoPort=41607, infoSecurePort=0, ipcPort=41363, storageInfo=lv=-57;cid=testClusterID;nsid=435581308;c=1732246676394) 2024-11-22T03:37:58,799 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83e1a8895d9a6734 with lease ID 0xa01e7d39d581ea4d: from storage DS-278cd7ce-4604-43d6-8504-c0bca0cfd1f0 node DatanodeRegistration(127.0.0.1:34585, datanodeUuid=32bb15c2-e35d-494b-aaf4-9678949ef5ff, infoPort=41607, infoSecurePort=0, ipcPort=41363, storageInfo=lv=-57;cid=testClusterID;nsid=435581308;c=1732246676394), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:58,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83e1a8895d9a6734 with lease ID 0xa01e7d39d581ea4d: Processing first storage report for DS-ccca1766-d608-4d78-ba32-ea4b517ea8cd from datanode DatanodeRegistration(127.0.0.1:34585, datanodeUuid=32bb15c2-e35d-494b-aaf4-9678949ef5ff, infoPort=41607, infoSecurePort=0, ipcPort=41363, storageInfo=lv=-57;cid=testClusterID;nsid=435581308;c=1732246676394) 2024-11-22T03:37:58,799 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83e1a8895d9a6734 with lease ID 0xa01e7d39d581ea4d: from storage DS-ccca1766-d608-4d78-ba32-ea4b517ea8cd node DatanodeRegistration(127.0.0.1:34585, datanodeUuid=32bb15c2-e35d-494b-aaf4-9678949ef5ff, infoPort=41607, infoSecurePort=0, ipcPort=41363, storageInfo=lv=-57;cid=testClusterID;nsid=435581308;c=1732246676394), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:58,916 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/cluster_49f704a3-79b4-e8f2-009f-8c87de040efc/data/data3/current/BP-513654789-172.17.0.3-1732246676394/current, will proceed with Du for space computation calculation, 2024-11-22T03:37:58,916 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/cluster_49f704a3-79b4-e8f2-009f-8c87de040efc/data/data4/current/BP-513654789-172.17.0.3-1732246676394/current, will proceed with Du for space computation calculation, 2024-11-22T03:37:58,936 WARN [Thread-1637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:37:58,938 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x11c30eaea66d01d7 with lease ID 0xa01e7d39d581ea4e: Processing first storage report for DS-05060aae-a4f8-4c04-85c9-a757812b2000 from datanode DatanodeRegistration(127.0.0.1:38163, datanodeUuid=5b2713d8-9de1-43e3-a383-f1a45d366bba, infoPort=46119, infoSecurePort=0, ipcPort=34975, storageInfo=lv=-57;cid=testClusterID;nsid=435581308;c=1732246676394) 2024-11-22T03:37:58,938 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x11c30eaea66d01d7 with lease ID 0xa01e7d39d581ea4e: from storage DS-05060aae-a4f8-4c04-85c9-a757812b2000 node DatanodeRegistration(127.0.0.1:38163, datanodeUuid=5b2713d8-9de1-43e3-a383-f1a45d366bba, infoPort=46119, infoSecurePort=0, ipcPort=34975, storageInfo=lv=-57;cid=testClusterID;nsid=435581308;c=1732246676394), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:58,938 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x11c30eaea66d01d7 with lease ID 0xa01e7d39d581ea4e: Processing first storage report for DS-c026057c-acac-4310-9216-5bdd53bc4b6d from datanode DatanodeRegistration(127.0.0.1:38163, datanodeUuid=5b2713d8-9de1-43e3-a383-f1a45d366bba, infoPort=46119, infoSecurePort=0, ipcPort=34975, storageInfo=lv=-57;cid=testClusterID;nsid=435581308;c=1732246676394) 2024-11-22T03:37:58,938 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x11c30eaea66d01d7 with lease ID 0xa01e7d39d581ea4e: from storage DS-c026057c-acac-4310-9216-5bdd53bc4b6d node DatanodeRegistration(127.0.0.1:38163, datanodeUuid=5b2713d8-9de1-43e3-a383-f1a45d366bba, infoPort=46119, infoSecurePort=0, ipcPort=34975, storageInfo=lv=-57;cid=testClusterID;nsid=435581308;c=1732246676394), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:37:59,034 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9 2024-11-22T03:37:59,037 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/cluster_49f704a3-79b4-e8f2-009f-8c87de040efc/zookeeper_0, clientPort=64084, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/cluster_49f704a3-79b4-e8f2-009f-8c87de040efc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/cluster_49f704a3-79b4-e8f2-009f-8c87de040efc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:37:59,038 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64084 2024-11-22T03:37:59,038 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:59,040 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:59,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:37:59,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:37:59,050 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826 with version=8 2024-11-22T03:37:59,050 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/hbase-staging 2024-11-22T03:37:59,052 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:37:59,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:37:59,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:37:59,052 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:37:59,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:37:59,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:37:59,052 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:37:59,052 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:37:59,053 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40691 2024-11-22T03:37:59,055 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40691 connecting to ZooKeeper ensemble=127.0.0.1:64084 2024-11-22T03:37:59,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:406910x0, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:37:59,113 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40691-0x101609ea9a70000 connected 2024-11-22T03:37:59,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:59,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:37:59,193 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:59,195 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:59,197 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:37:59,198 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826, hbase.cluster.distributed=false 2024-11-22T03:37:59,200 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:37:59,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40691 2024-11-22T03:37:59,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40691 2024-11-22T03:37:59,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40691 2024-11-22T03:37:59,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40691 2024-11-22T03:37:59,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40691 2024-11-22T03:37:59,221 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:37:59,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:37:59,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:37:59,221 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:37:59,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:37:59,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:37:59,221 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:37:59,221 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:37:59,222 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:32923 2024-11-22T03:37:59,224 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32923 connecting to ZooKeeper ensemble=127.0.0.1:64084 2024-11-22T03:37:59,224 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:59,226 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:59,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329230x0, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:37:59,236 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:329230x0, quorum=127.0.0.1:64084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:37:59,236 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32923-0x101609ea9a70001 connected 2024-11-22T03:37:59,236 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:37:59,237 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:37:59,237 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:37:59,238 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:37:59,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32923 2024-11-22T03:37:59,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32923 2024-11-22T03:37:59,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32923 2024-11-22T03:37:59,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32923 2024-11-22T03:37:59,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32923 2024-11-22T03:37:59,252 DEBUG [M:0;b458937b0f5f:40691 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b458937b0f5f:40691 2024-11-22T03:37:59,253 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b458937b0f5f,40691,1732246679052 2024-11-22T03:37:59,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:37:59,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:37:59,257 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b458937b0f5f,40691,1732246679052 2024-11-22T03:37:59,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:37:59,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:59,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:59,267 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:37:59,268 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b458937b0f5f,40691,1732246679052 from backup master directory 2024-11-22T03:37:59,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:37:59,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b458937b0f5f,40691,1732246679052 2024-11-22T03:37:59,277 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:37:59,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:37:59,277 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b458937b0f5f,40691,1732246679052 2024-11-22T03:37:59,282 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/hbase.id] with ID: 4d9ab365-43a4-4ebd-99d7-857d8cb3b2d6 2024-11-22T03:37:59,282 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/.tmp/hbase.id 2024-11-22T03:37:59,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:37:59,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:37:59,290 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/.tmp/hbase.id]:[hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/hbase.id] 2024-11-22T03:37:59,301 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:37:59,301 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:37:59,302 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T03:37:59,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:59,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:59,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:37:59,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:37:59,316 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:37:59,317 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:37:59,317 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:37:59,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:37:59,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:37:59,325 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store 2024-11-22T03:37:59,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:37:59,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:37:59,332 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:37:59,332 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:37:59,332 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:59,332 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:59,332 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:37:59,332 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:59,332 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:37:59,332 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246679332Disabling compacts and flushes for region at 1732246679332Disabling writes for close at 1732246679332Writing region close event to WAL at 1732246679332Closed at 1732246679332 2024-11-22T03:37:59,333 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/.initializing 2024-11-22T03:37:59,334 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/WALs/b458937b0f5f,40691,1732246679052 2024-11-22T03:37:59,336 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C40691%2C1732246679052, suffix=, logDir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/WALs/b458937b0f5f,40691,1732246679052, archiveDir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/oldWALs, maxLogs=10 2024-11-22T03:37:59,337 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C40691%2C1732246679052.1732246679336 2024-11-22T03:37:59,341 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/WALs/b458937b0f5f,40691,1732246679052/b458937b0f5f%2C40691%2C1732246679052.1732246679336 2024-11-22T03:37:59,349 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46119:46119),(127.0.0.1/127.0.0.1:41607:41607)] 2024-11-22T03:37:59,349 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:37:59,349 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:37:59,350 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:59,350 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:59,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:59,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:37:59,353 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:59,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:59,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:59,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:37:59,355 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:59,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:37:59,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:59,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:37:59,357 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:59,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:37:59,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:59,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:37:59,359 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:59,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:37:59,359 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:59,360 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:59,360 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:59,362 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:59,362 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:59,362 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:37:59,364 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:37:59,366 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:37:59,366 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810659, jitterRate=0.030807361006736755}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:37:59,367 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732246679350Initializing all the Stores at 1732246679350Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246679350Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246679351 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246679351Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246679351Cleaning up temporary data from old regions at 1732246679362 (+11 ms)Region opened successfully at 1732246679367 (+5 ms) 2024-11-22T03:37:59,367 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:37:59,370 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14eecbbc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:37:59,371 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:37:59,371 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:37:59,371 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:37:59,371 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:37:59,372 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T03:37:59,372 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T03:37:59,372 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:37:59,374 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:37:59,375 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:37:59,382 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:37:59,383 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:37:59,384 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:37:59,393 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:37:59,394 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:37:59,395 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:37:59,403 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:37:59,405 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:37:59,414 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:37:59,416 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:37:59,425 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:37:59,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:37:59,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:37:59,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:59,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:59,436 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b458937b0f5f,40691,1732246679052, sessionid=0x101609ea9a70000, setting cluster-up flag (Was=false) 2024-11-22T03:37:59,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:59,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:59,488 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:37:59,489 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,40691,1732246679052 2024-11-22T03:37:59,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:59,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:37:59,541 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:37:59,542 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,40691,1732246679052 2024-11-22T03:37:59,543 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:37:59,545 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:37:59,546 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:37:59,546 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:37:59,546 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b458937b0f5f,40691,1732246679052 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:37:59,548 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:37:59,548 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:37:59,548 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:37:59,548 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:37:59,548 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b458937b0f5f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:37:59,548 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,548 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:37:59,548 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,549 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732246709549 2024-11-22T03:37:59,549 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:37:59,550 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:37:59,550 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:37:59,550 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:37:59,550 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:37:59,550 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:37:59,550 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,550 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:37:59,550 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:37:59,550 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:37:59,551 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:37:59,551 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:37:59,551 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:37:59,551 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:37:59,551 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246679551,5,FailOnTimeoutGroup] 2024-11-22T03:37:59,552 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246679551,5,FailOnTimeoutGroup] 2024-11-22T03:37:59,552 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:59,552 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,552 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:37:59,552 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,552 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,552 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:37:59,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:37:59,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:37:59,571 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:37:59,571 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826 2024-11-22T03:37:59,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:37:59,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:37:59,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:37:59,581 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:37:59,583 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:37:59,583 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:59,583 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:59,583 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:37:59,584 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:37:59,584 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:59,585 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:59,585 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:37:59,586 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:37:59,586 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:59,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:59,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:37:59,588 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:37:59,588 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:37:59,589 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:37:59,589 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:37:59,589 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740 2024-11-22T03:37:59,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740 2024-11-22T03:37:59,591 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:37:59,591 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:37:59,591 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:37:59,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:37:59,594 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:37:59,595 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854931, jitterRate=0.08710113167762756}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:37:59,595 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732246679580Initializing all the Stores at 1732246679581 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246679581Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246679581Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246679581Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246679581Cleaning up temporary data from old regions at 1732246679591 (+10 ms)Region opened successfully at 1732246679595 (+4 ms) 2024-11-22T03:37:59,595 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:37:59,595 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:37:59,596 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:37:59,596 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:37:59,596 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:37:59,596 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:37:59,596 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246679595Disabling compacts and flushes for region at 1732246679595Disabling writes for close at 1732246679596 (+1 ms)Writing region close event to WAL at 1732246679596Closed at 1732246679596 2024-11-22T03:37:59,597 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:37:59,597 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:37:59,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:37:59,599 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:37:59,600 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:37:59,642 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(746): ClusterId : 4d9ab365-43a4-4ebd-99d7-857d8cb3b2d6 2024-11-22T03:37:59,642 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:37:59,646 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:37:59,646 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:37:59,657 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:37:59,657 DEBUG [RS:0;b458937b0f5f:32923 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51f0af07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:37:59,668 DEBUG [RS:0;b458937b0f5f:32923 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b458937b0f5f:32923 2024-11-22T03:37:59,669 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:37:59,669 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:37:59,669 DEBUG [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:37:59,669 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(2659): reportForDuty to master=b458937b0f5f,40691,1732246679052 with port=32923, startcode=1732246679221 2024-11-22T03:37:59,669 DEBUG [RS:0;b458937b0f5f:32923 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:37:59,671 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51071, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:37:59,672 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40691 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b458937b0f5f,32923,1732246679221 2024-11-22T03:37:59,672 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40691 {}] master.ServerManager(517): Registering regionserver=b458937b0f5f,32923,1732246679221 2024-11-22T03:37:59,673 DEBUG [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826 2024-11-22T03:37:59,673 DEBUG [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46403 2024-11-22T03:37:59,673 DEBUG [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:37:59,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:37:59,678 DEBUG [RS:0;b458937b0f5f:32923 {}] zookeeper.ZKUtil(111): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b458937b0f5f,32923,1732246679221 2024-11-22T03:37:59,678 WARN [RS:0;b458937b0f5f:32923 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:37:59,678 INFO [RS:0;b458937b0f5f:32923 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:37:59,678 DEBUG [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221 2024-11-22T03:37:59,678 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b458937b0f5f,32923,1732246679221] 2024-11-22T03:37:59,681 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:37:59,682 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:37:59,683 INFO [RS:0;b458937b0f5f:32923 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:37:59,683 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,683 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:37:59,684 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:37:59,684 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:37:59,684 DEBUG [RS:0;b458937b0f5f:32923 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:37:59,685 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,685 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,685 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,685 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,685 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,685 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,32923,1732246679221-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:37:59,700 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:37:59,700 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,32923,1732246679221-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,700 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,700 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.Replication(171): b458937b0f5f,32923,1732246679221 started 2024-11-22T03:37:59,716 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:37:59,716 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(1482): Serving as b458937b0f5f,32923,1732246679221, RpcServer on b458937b0f5f/172.17.0.3:32923, sessionid=0x101609ea9a70001 2024-11-22T03:37:59,716 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:37:59,716 DEBUG [RS:0;b458937b0f5f:32923 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b458937b0f5f,32923,1732246679221 2024-11-22T03:37:59,717 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,32923,1732246679221' 2024-11-22T03:37:59,717 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:37:59,717 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:37:59,718 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:37:59,718 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:37:59,718 DEBUG [RS:0;b458937b0f5f:32923 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b458937b0f5f,32923,1732246679221 2024-11-22T03:37:59,718 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,32923,1732246679221' 2024-11-22T03:37:59,718 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:37:59,718 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:37:59,718 DEBUG [RS:0;b458937b0f5f:32923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:37:59,718 INFO [RS:0;b458937b0f5f:32923 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:37:59,718 INFO [RS:0;b458937b0f5f:32923 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:37:59,750 WARN [b458937b0f5f:40691 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:37:59,821 INFO [RS:0;b458937b0f5f:32923 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C32923%2C1732246679221, suffix=, logDir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221, archiveDir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/oldWALs, maxLogs=32 2024-11-22T03:37:59,821 INFO [RS:0;b458937b0f5f:32923 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C32923%2C1732246679221.1732246679821 2024-11-22T03:37:59,828 INFO [RS:0;b458937b0f5f:32923 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.1732246679821 2024-11-22T03:37:59,830 DEBUG [RS:0;b458937b0f5f:32923 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46119:46119),(127.0.0.1/127.0.0.1:41607:41607)] 2024-11-22T03:38:00,000 DEBUG [b458937b0f5f:40691 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:38:00,001 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b458937b0f5f,32923,1732246679221 2024-11-22T03:38:00,002 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,32923,1732246679221, state=OPENING 2024-11-22T03:38:00,014 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:38:00,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:00,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:00,109 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:38:00,109 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:38:00,109 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:38:00,109 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,32923,1732246679221}] 2024-11-22T03:38:00,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:00,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:00,263 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:38:00,265 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55801, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:38:00,268 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:38:00,268 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:38:00,270 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C32923%2C1732246679221.meta, suffix=.meta, logDir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221, archiveDir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/oldWALs, maxLogs=32 2024-11-22T03:38:00,271 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C32923%2C1732246679221.meta.1732246680270.meta 2024-11-22T03:38:00,280 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.meta.1732246680270.meta 2024-11-22T03:38:00,282 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46119:46119),(127.0.0.1/127.0.0.1:41607:41607)] 2024-11-22T03:38:00,284 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:38:00,285 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:38:00,285 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:38:00,285 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:38:00,285 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:38:00,285 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:38:00,285 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:38:00,285 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:38:00,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:38:00,287 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:38:00,287 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:00,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:00,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:38:00,288 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:38:00,289 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:00,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:00,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:38:00,290 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:38:00,290 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:00,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:00,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:38:00,291 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:38:00,291 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:00,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:00,292 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:38:00,292 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740 2024-11-22T03:38:00,293 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740 2024-11-22T03:38:00,295 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:38:00,295 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:38:00,295 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:38:00,297 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:38:00,297 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805945, jitterRate=0.024812400341033936}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:38:00,297 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:38:00,298 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732246680285Writing region info on filesystem at 1732246680285Initializing all the Stores at 1732246680286 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246680286Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246680286Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246680286Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246680286Cleaning up temporary data from old regions at 1732246680295 (+9 ms)Running coprocessor post-open hooks at 1732246680297 (+2 ms)Region opened successfully at 1732246680298 (+1 ms) 2024-11-22T03:38:00,299 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732246680262 2024-11-22T03:38:00,302 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:38:00,302 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:38:00,303 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b458937b0f5f,32923,1732246679221 2024-11-22T03:38:00,304 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,32923,1732246679221, state=OPEN 2024-11-22T03:38:00,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:38:00,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:38:00,336 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b458937b0f5f,32923,1732246679221 2024-11-22T03:38:00,336 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:38:00,336 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:38:00,339 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:38:00,339 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,32923,1732246679221 in 227 msec 2024-11-22T03:38:00,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:38:00,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 742 msec 2024-11-22T03:38:00,344 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:38:00,344 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:38:00,345 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:38:00,345 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,32923,1732246679221, seqNum=-1] 2024-11-22T03:38:00,346 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:38:00,348 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45513, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:38:00,355 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 809 msec 2024-11-22T03:38:00,355 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732246680355, completionTime=-1 2024-11-22T03:38:00,356 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:38:00,356 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:38:00,358 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:38:00,358 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732246740358 2024-11-22T03:38:00,358 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732246800358 2024-11-22T03:38:00,359 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T03:38:00,359 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40691,1732246679052-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:00,359 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40691,1732246679052-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:00,359 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40691,1732246679052-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:00,359 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b458937b0f5f:40691, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:00,359 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:00,360 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:00,362 DEBUG [master/b458937b0f5f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:38:00,365 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.088sec 2024-11-22T03:38:00,365 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:38:00,365 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:38:00,365 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:38:00,365 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:38:00,365 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:38:00,365 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40691,1732246679052-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:38:00,365 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40691,1732246679052-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:38:00,368 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:38:00,368 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:38:00,368 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40691,1732246679052-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:00,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d09da72, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:38:00,442 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b458937b0f5f,40691,-1 for getting cluster id 2024-11-22T03:38:00,442 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:38:00,444 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4d9ab365-43a4-4ebd-99d7-857d8cb3b2d6' 2024-11-22T03:38:00,445 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:38:00,445 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4d9ab365-43a4-4ebd-99d7-857d8cb3b2d6" 2024-11-22T03:38:00,445 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74da7a76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:38:00,445 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b458937b0f5f,40691,-1] 2024-11-22T03:38:00,446 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:38:00,446 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:38:00,447 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60296, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:38:00,449 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d135b12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:38:00,449 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:38:00,450 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,32923,1732246679221, seqNum=-1] 2024-11-22T03:38:00,451 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:38:00,452 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42312, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:38:00,454 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b458937b0f5f,40691,1732246679052 2024-11-22T03:38:00,455 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:38:00,457 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:38:00,458 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T03:38:00,459 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is b458937b0f5f,40691,1732246679052 2024-11-22T03:38:00,459 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@42517880 2024-11-22T03:38:00,459 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T03:38:00,460 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60310, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T03:38:00,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T03:38:00,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T03:38:00,461 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:38:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:38:00,464 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T03:38:00,464 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:00,464 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-22T03:38:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:38:00,466 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T03:38:00,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741835_1011 (size=405) 2024-11-22T03:38:00,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741835_1011 (size=405) 2024-11-22T03:38:00,480 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d91888e721fdb899e701309a492e633a, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826 2024-11-22T03:38:00,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741836_1012 (size=88) 2024-11-22T03:38:00,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741836_1012 (size=88) 2024-11-22T03:38:00,491 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:38:00,491 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing d91888e721fdb899e701309a492e633a, disabling compactions & flushes 2024-11-22T03:38:00,491 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:00,491 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:00,491 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. after waiting 0 ms 2024-11-22T03:38:00,492 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:00,492 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:00,492 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for d91888e721fdb899e701309a492e633a: Waiting for close lock at 1732246680491Disabling compacts and flushes for region at 1732246680491Disabling writes for close at 1732246680491Writing region close event to WAL at 1732246680492 (+1 ms)Closed at 1732246680492 2024-11-22T03:38:00,493 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T03:38:00,493 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732246680493"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732246680493"}]},"ts":"1732246680493"} 2024-11-22T03:38:00,496 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T03:38:00,497 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T03:38:00,498 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732246680497"}]},"ts":"1732246680497"} 2024-11-22T03:38:00,500 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-22T03:38:00,501 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=d91888e721fdb899e701309a492e633a, ASSIGN}] 2024-11-22T03:38:00,502 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=d91888e721fdb899e701309a492e633a, ASSIGN 2024-11-22T03:38:00,503 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=d91888e721fdb899e701309a492e633a, ASSIGN; state=OFFLINE, location=b458937b0f5f,32923,1732246679221; forceNewPlan=false, retain=false 2024-11-22T03:38:00,654 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d91888e721fdb899e701309a492e633a, regionState=OPENING, regionLocation=b458937b0f5f,32923,1732246679221 2024-11-22T03:38:00,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=d91888e721fdb899e701309a492e633a, ASSIGN because future has completed 2024-11-22T03:38:00,658 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d91888e721fdb899e701309a492e633a, server=b458937b0f5f,32923,1732246679221}] 2024-11-22T03:38:00,815 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:00,815 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d91888e721fdb899e701309a492e633a, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:38:00,815 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling d91888e721fdb899e701309a492e633a 2024-11-22T03:38:00,816 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:38:00,816 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d91888e721fdb899e701309a492e633a 2024-11-22T03:38:00,816 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d91888e721fdb899e701309a492e633a 2024-11-22T03:38:00,817 INFO [StoreOpener-d91888e721fdb899e701309a492e633a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d91888e721fdb899e701309a492e633a 2024-11-22T03:38:00,819 INFO [StoreOpener-d91888e721fdb899e701309a492e633a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d91888e721fdb899e701309a492e633a columnFamilyName info 2024-11-22T03:38:00,819 DEBUG [StoreOpener-d91888e721fdb899e701309a492e633a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:00,819 INFO [StoreOpener-d91888e721fdb899e701309a492e633a-1 {}] regionserver.HStore(327): Store=d91888e721fdb899e701309a492e633a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:38:00,820 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d91888e721fdb899e701309a492e633a 2024-11-22T03:38:00,821 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a 2024-11-22T03:38:00,821 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a 2024-11-22T03:38:00,822 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d91888e721fdb899e701309a492e633a 2024-11-22T03:38:00,822 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d91888e721fdb899e701309a492e633a 2024-11-22T03:38:00,824 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d91888e721fdb899e701309a492e633a 2024-11-22T03:38:00,826 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:38:00,826 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d91888e721fdb899e701309a492e633a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808592, jitterRate=0.02817896008491516}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:38:00,827 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d91888e721fdb899e701309a492e633a 2024-11-22T03:38:00,827 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d91888e721fdb899e701309a492e633a: Running coprocessor pre-open hook at 1732246680816Writing region info on filesystem at 1732246680816Initializing all the Stores at 1732246680817 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246680817Cleaning up temporary data from old regions at 1732246680822 (+5 ms)Running coprocessor post-open hooks at 1732246680827 (+5 ms)Region opened successfully at 1732246680827 2024-11-22T03:38:00,829 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a., pid=6, masterSystemTime=1732246680810 2024-11-22T03:38:00,831 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:00,831 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:00,833 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d91888e721fdb899e701309a492e633a, regionState=OPEN, openSeqNum=2, regionLocation=b458937b0f5f,32923,1732246679221 2024-11-22T03:38:00,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d91888e721fdb899e701309a492e633a, server=b458937b0f5f,32923,1732246679221 because future has completed 2024-11-22T03:38:00,841 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T03:38:00,841 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d91888e721fdb899e701309a492e633a, server=b458937b0f5f,32923,1732246679221 in 180 msec 2024-11-22T03:38:00,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T03:38:00,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=d91888e721fdb899e701309a492e633a, ASSIGN in 341 msec 2024-11-22T03:38:00,845 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T03:38:00,845 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732246680845"}]},"ts":"1732246680845"} 2024-11-22T03:38:00,847 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-22T03:38:00,849 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T03:38:00,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 388 msec 2024-11-22T03:38:01,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:01,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:02,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:02,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:03,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:03,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:03,394 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:38:03,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,431 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,431 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,431 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:03,442 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:04,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:04,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:05,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:05,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:05,681 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T03:38:05,682 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-22T03:38:06,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:06,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:07,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:07,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:07,891 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:38:07,891 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T03:38:07,892 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:38:07,892 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T03:38:07,892 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T03:38:07,892 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T03:38:07,893 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:38:07,893 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T03:38:08,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:08,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:09,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:09,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:10,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:10,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:10,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:38:10,514 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T03:38:10,514 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-22T03:38:10,518 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:38:10,518 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:10,521 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a., hostname=b458937b0f5f,32923,1732246679221, seqNum=2] 2024-11-22T03:38:10,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:38:10,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:38:10,536 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T03:38:10,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T03:38:10,538 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T03:38:10,539 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T03:38:10,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32923 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-22T03:38:10,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:10,703 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing d91888e721fdb899e701309a492e633a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T03:38:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/a8e8f88f91154eaa8e8edc7d96168043 is 1080, key is row0001/info:/1732246690523/Put/seqid=0 2024-11-22T03:38:10,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741837_1013 (size=6033) 2024-11-22T03:38:10,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741837_1013 (size=6033) 2024-11-22T03:38:10,728 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/a8e8f88f91154eaa8e8edc7d96168043 2024-11-22T03:38:10,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/a8e8f88f91154eaa8e8edc7d96168043 as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/a8e8f88f91154eaa8e8edc7d96168043 2024-11-22T03:38:10,742 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/a8e8f88f91154eaa8e8edc7d96168043, entries=1, sequenceid=5, filesize=5.9 K 2024-11-22T03:38:10,743 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for d91888e721fdb899e701309a492e633a in 40ms, sequenceid=5, compaction requested=false 2024-11-22T03:38:10,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for d91888e721fdb899e701309a492e633a: 2024-11-22T03:38:10,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:10,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-22T03:38:10,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-22T03:38:10,751 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-22T03:38:10,751 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 209 msec 2024-11-22T03:38:10,753 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 220 msec 2024-11-22T03:38:11,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:11,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:12,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:12,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:13,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:13,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:14,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:14,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:15,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:15,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:16,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:16,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:17,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:17,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:18,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:18,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:19,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:19,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:20,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:20,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 after 68047ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:38:20,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:20,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta after 68040ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:38:20,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T03:38:20,595 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T03:38:20,599 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:38:20,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:38:20,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-22T03:38:20,603 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T03:38:20,605 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T03:38:20,605 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T03:38:20,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32923 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-22T03:38:20,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:20,760 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing d91888e721fdb899e701309a492e633a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T03:38:20,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/cf23c9b5aad54ec09efdf57190fa1112 is 1080, key is row0002/info:/1732246700596/Put/seqid=0 2024-11-22T03:38:20,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741838_1014 (size=6033) 2024-11-22T03:38:20,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741838_1014 (size=6033) 2024-11-22T03:38:20,772 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/cf23c9b5aad54ec09efdf57190fa1112 2024-11-22T03:38:20,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/cf23c9b5aad54ec09efdf57190fa1112 as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/cf23c9b5aad54ec09efdf57190fa1112 2024-11-22T03:38:20,788 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/cf23c9b5aad54ec09efdf57190fa1112, entries=1, sequenceid=9, filesize=5.9 K 2024-11-22T03:38:20,789 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for d91888e721fdb899e701309a492e633a in 29ms, sequenceid=9, compaction requested=false 2024-11-22T03:38:20,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for d91888e721fdb899e701309a492e633a: 2024-11-22T03:38:20,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:20,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-22T03:38:20,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-22T03:38:20,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-22T03:38:20,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-22T03:38:20,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 196 msec 2024-11-22T03:38:21,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:21,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:22,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:22,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:23,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:23,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:24,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:24,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:25,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:25,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:26,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:26,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:27,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:27,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:28,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:28,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:29,033 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:38:29,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:29,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:30,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:30,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-22T03:38:30,614 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T03:38:30,617 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C32923%2C1732246679221.1732246710617 2024-11-22T03:38:30,632 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:30,632 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:30,633 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:30,633 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:30,633 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:30,633 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.1732246679821 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.1732246710617 2024-11-22T03:38:30,634 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41607:41607),(127.0.0.1/127.0.0.1:46119:46119)] 2024-11-22T03:38:30,634 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.1732246679821 is not closed yet, will try archiving it next time 2024-11-22T03:38:30,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741833_1009 (size=5546) 2024-11-22T03:38:30,635 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:38:30,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741833_1009 (size=5546) 2024-11-22T03:38:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:38:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-22T03:38:30,637 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T03:38:30,638 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T03:38:30,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T03:38:30,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32923 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-22T03:38:30,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:30,792 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing d91888e721fdb899e701309a492e633a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T03:38:30,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/f6bade98f4394b40ba37b43a5e412e52 is 1080, key is row0003/info:/1732246710615/Put/seqid=0 2024-11-22T03:38:30,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741840_1016 (size=6033) 2024-11-22T03:38:30,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741840_1016 (size=6033) 2024-11-22T03:38:30,805 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/f6bade98f4394b40ba37b43a5e412e52 2024-11-22T03:38:30,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/f6bade98f4394b40ba37b43a5e412e52 as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/f6bade98f4394b40ba37b43a5e412e52 2024-11-22T03:38:30,817 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/f6bade98f4394b40ba37b43a5e412e52, entries=1, sequenceid=13, filesize=5.9 K 2024-11-22T03:38:30,819 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for d91888e721fdb899e701309a492e633a in 26ms, sequenceid=13, compaction requested=true 2024-11-22T03:38:30,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for d91888e721fdb899e701309a492e633a: 2024-11-22T03:38:30,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:30,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-22T03:38:30,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-22T03:38:30,824 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-22T03:38:30,825 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 182 msec 2024-11-22T03:38:30,827 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 190 msec 2024-11-22T03:38:31,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:31,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:32,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:32,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:33,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:33,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:34,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:34,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:35,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:35,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:36,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:36,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:37,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:37,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:38,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:38,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:39,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:39,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:40,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:40,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:40,396 INFO [master/b458937b0f5f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T03:38:40,396 INFO [master/b458937b0f5f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T03:38:40,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-22T03:38:40,684 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T03:38:40,684 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:38:40,686 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:38:40,686 DEBUG [Time-limited test {}] regionserver.HStore(1541): d91888e721fdb899e701309a492e633a/info is initiating minor compaction (all files) 2024-11-22T03:38:40,686 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:38:40,686 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:40,686 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of d91888e721fdb899e701309a492e633a/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:40,686 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/a8e8f88f91154eaa8e8edc7d96168043, hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/cf23c9b5aad54ec09efdf57190fa1112, hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/f6bade98f4394b40ba37b43a5e412e52] into tmpdir=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp, totalSize=17.7 K 2024-11-22T03:38:40,687 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting a8e8f88f91154eaa8e8edc7d96168043, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732246690523 2024-11-22T03:38:40,687 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting cf23c9b5aad54ec09efdf57190fa1112, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732246700596 2024-11-22T03:38:40,688 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f6bade98f4394b40ba37b43a5e412e52, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732246710615 2024-11-22T03:38:40,701 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): d91888e721fdb899e701309a492e633a#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:38:40,702 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/f3219752166c4c6a86614f249d611ae8 is 1080, key is row0001/info:/1732246690523/Put/seqid=0 2024-11-22T03:38:40,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741841_1017 (size=8296) 2024-11-22T03:38:40,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741841_1017 (size=8296) 2024-11-22T03:38:40,726 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/f3219752166c4c6a86614f249d611ae8 as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/f3219752166c4c6a86614f249d611ae8 2024-11-22T03:38:40,734 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d91888e721fdb899e701309a492e633a/info of d91888e721fdb899e701309a492e633a into f3219752166c4c6a86614f249d611ae8(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:38:40,734 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for d91888e721fdb899e701309a492e633a: 2024-11-22T03:38:40,736 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C32923%2C1732246679221.1732246720736 2024-11-22T03:38:40,746 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:40,746 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:40,746 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:40,746 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:40,746 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:40,746 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.1732246710617 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.1732246720736 2024-11-22T03:38:40,747 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41607:41607),(127.0.0.1/127.0.0.1:46119:46119)] 2024-11-22T03:38:40,747 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.1732246710617 is not closed yet, will try archiving it next time 2024-11-22T03:38:40,748 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.1732246679821 to hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/oldWALs/b458937b0f5f%2C32923%2C1732246679221.1732246679821 2024-11-22T03:38:40,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741839_1015 (size=2520) 2024-11-22T03:38:40,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741839_1015 (size=2520) 2024-11-22T03:38:40,749 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:38:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:38:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-22T03:38:40,752 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T03:38:40,753 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T03:38:40,753 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T03:38:40,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32923 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-22T03:38:40,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:40,906 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing d91888e721fdb899e701309a492e633a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T03:38:40,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/cb33e7cffbca4137b0876454d4b719a8 is 1080, key is row0000/info:/1732246720735/Put/seqid=0 2024-11-22T03:38:40,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741843_1019 (size=6033) 2024-11-22T03:38:40,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741843_1019 (size=6033) 2024-11-22T03:38:40,919 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/cb33e7cffbca4137b0876454d4b719a8 2024-11-22T03:38:40,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/cb33e7cffbca4137b0876454d4b719a8 as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/cb33e7cffbca4137b0876454d4b719a8 2024-11-22T03:38:40,932 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/cb33e7cffbca4137b0876454d4b719a8, entries=1, sequenceid=18, filesize=5.9 K 2024-11-22T03:38:40,933 INFO [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for d91888e721fdb899e701309a492e633a in 27ms, sequenceid=18, compaction requested=false 2024-11-22T03:38:40,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for d91888e721fdb899e701309a492e633a: 2024-11-22T03:38:40,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:40,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-22T03:38:40,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-22T03:38:40,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-22T03:38:40,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 182 msec 2024-11-22T03:38:40,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-11-22T03:38:41,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:41,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:42,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:42,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:43,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:43,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:44,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:44,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:45,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:45,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:45,816 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d91888e721fdb899e701309a492e633a, had cached 0 bytes from a total of 14329 2024-11-22T03:38:46,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:46,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:47,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:47,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:48,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:48,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:49,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:49,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:50,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:50,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:50,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40691 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-22T03:38:50,794 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T03:38:50,798 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C32923%2C1732246679221.1732246730798 2024-11-22T03:38:50,805 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:50,805 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:50,806 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:50,806 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:50,806 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:50,806 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.1732246720736 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.1732246730798 2024-11-22T03:38:50,807 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46119:46119),(127.0.0.1/127.0.0.1:41607:41607)] 2024-11-22T03:38:50,807 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.1732246720736 is not closed yet, will try archiving it next time 2024-11-22T03:38:50,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:38:50,807 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/WALs/b458937b0f5f,32923,1732246679221/b458937b0f5f%2C32923%2C1732246679221.1732246710617 to hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/oldWALs/b458937b0f5f%2C32923%2C1732246679221.1732246710617 2024-11-22T03:38:50,807 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:38:50,807 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:38:50,807 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:38:50,807 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:38:50,807 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:38:50,807 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:38:50,808 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1916862285, stopped=false 2024-11-22T03:38:50,808 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b458937b0f5f,40691,1732246679052 2024-11-22T03:38:50,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741842_1018 (size=2026) 2024-11-22T03:38:50,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741842_1018 (size=2026) 2024-11-22T03:38:50,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:38:50,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:38:50,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:50,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:50,853 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:38:50,853 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:38:50,854 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:38:50,854 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:38:50,854 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:38:50,854 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:38:50,854 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b458937b0f5f,32923,1732246679221' ***** 2024-11-22T03:38:50,854 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:38:50,854 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:38:50,855 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:38:50,855 INFO [RS:0;b458937b0f5f:32923 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:38:50,855 INFO [RS:0;b458937b0f5f:32923 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:38:50,855 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(3091): Received CLOSE for d91888e721fdb899e701309a492e633a 2024-11-22T03:38:50,855 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(959): stopping server b458937b0f5f,32923,1732246679221 2024-11-22T03:38:50,855 INFO [RS:0;b458937b0f5f:32923 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:38:50,855 INFO [RS:0;b458937b0f5f:32923 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b458937b0f5f:32923. 2024-11-22T03:38:50,855 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d91888e721fdb899e701309a492e633a, disabling compactions & flushes 2024-11-22T03:38:50,855 DEBUG [RS:0;b458937b0f5f:32923 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:38:50,855 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:50,855 DEBUG [RS:0;b458937b0f5f:32923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:38:50,855 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:50,855 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:38:50,855 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. after waiting 0 ms 2024-11-22T03:38:50,855 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:38:50,856 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:50,856 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:38:50,856 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:38:50,856 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing d91888e721fdb899e701309a492e633a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T03:38:50,856 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T03:38:50,856 DEBUG [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, d91888e721fdb899e701309a492e633a=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.} 2024-11-22T03:38:50,856 DEBUG [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d91888e721fdb899e701309a492e633a 2024-11-22T03:38:50,856 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:38:50,857 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:38:50,857 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:38:50,857 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:38:50,857 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:38:50,857 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-22T03:38:50,861 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/ae926b43972249cba186addeba485c61 is 1080, key is row0001/info:/1732246730796/Put/seqid=0 2024-11-22T03:38:50,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741845_1021 (size=6033) 2024-11-22T03:38:50,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741845_1021 (size=6033) 2024-11-22T03:38:50,868 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/ae926b43972249cba186addeba485c61 2024-11-22T03:38:50,875 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/.tmp/info/ae926b43972249cba186addeba485c61 as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/ae926b43972249cba186addeba485c61 2024-11-22T03:38:50,877 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/.tmp/info/e07695589eee4a668cc7927a32c84192 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a./info:regioninfo/1732246680832/Put/seqid=0 2024-11-22T03:38:50,882 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/ae926b43972249cba186addeba485c61, entries=1, sequenceid=22, filesize=5.9 K 2024-11-22T03:38:50,883 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for d91888e721fdb899e701309a492e633a in 27ms, sequenceid=22, compaction requested=true 2024-11-22T03:38:50,883 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/a8e8f88f91154eaa8e8edc7d96168043, hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/cf23c9b5aad54ec09efdf57190fa1112, hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/f6bade98f4394b40ba37b43a5e412e52] to archive 2024-11-22T03:38:50,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741846_1022 (size=7308) 2024-11-22T03:38:50,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741846_1022 (size=7308) 2024-11-22T03:38:50,884 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T03:38:50,885 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/.tmp/info/e07695589eee4a668cc7927a32c84192 2024-11-22T03:38:50,886 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/a8e8f88f91154eaa8e8edc7d96168043 to hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/a8e8f88f91154eaa8e8edc7d96168043 2024-11-22T03:38:50,887 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/cf23c9b5aad54ec09efdf57190fa1112 to hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/cf23c9b5aad54ec09efdf57190fa1112 2024-11-22T03:38:50,889 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/f6bade98f4394b40ba37b43a5e412e52 to hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/info/f6bade98f4394b40ba37b43a5e412e52 2024-11-22T03:38:50,889 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=b458937b0f5f:40691 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-22T03:38:50,890 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a8e8f88f91154eaa8e8edc7d96168043=6033, cf23c9b5aad54ec09efdf57190fa1112=6033, f6bade98f4394b40ba37b43a5e412e52=6033] 2024-11-22T03:38:50,904 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/d91888e721fdb899e701309a492e633a/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-22T03:38:50,904 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:50,904 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d91888e721fdb899e701309a492e633a: Waiting for close lock at 1732246730855Running coprocessor pre-close hooks at 1732246730855Disabling compacts and flushes for region at 1732246730855Disabling writes for close at 1732246730855Obtaining lock to block concurrent updates at 1732246730856 (+1 ms)Preparing flush snapshotting stores in d91888e721fdb899e701309a492e633a at 1732246730856Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732246730856Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. at 1732246730857 (+1 ms)Flushing d91888e721fdb899e701309a492e633a/info: creating writer at 1732246730857Flushing d91888e721fdb899e701309a492e633a/info: appending metadata at 1732246730860 (+3 ms)Flushing d91888e721fdb899e701309a492e633a/info: closing flushed file at 1732246730860Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58566d58: reopening flushed file at 1732246730874 (+14 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for d91888e721fdb899e701309a492e633a in 27ms, sequenceid=22, compaction requested=true at 1732246730883 (+9 ms)Writing region close event to WAL at 1732246730890 (+7 ms)Running coprocessor post-close hooks at 1732246730904 (+14 ms)Closed at 1732246730904 2024-11-22T03:38:50,905 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732246680461.d91888e721fdb899e701309a492e633a. 2024-11-22T03:38:50,907 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/.tmp/ns/5ffa4323e7ec494abe3a199039086185 is 43, key is default/ns:d/1732246680349/Put/seqid=0 2024-11-22T03:38:50,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741847_1023 (size=5153) 2024-11-22T03:38:50,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741847_1023 (size=5153) 2024-11-22T03:38:50,913 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/.tmp/ns/5ffa4323e7ec494abe3a199039086185 2024-11-22T03:38:50,933 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/.tmp/table/344c63629a7546de84020193ccd7ec9f is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732246680845/Put/seqid=0 2024-11-22T03:38:50,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741848_1024 (size=5508) 2024-11-22T03:38:50,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741848_1024 (size=5508) 2024-11-22T03:38:50,938 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/.tmp/table/344c63629a7546de84020193ccd7ec9f 2024-11-22T03:38:50,944 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/.tmp/info/e07695589eee4a668cc7927a32c84192 as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/info/e07695589eee4a668cc7927a32c84192 2024-11-22T03:38:50,950 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/info/e07695589eee4a668cc7927a32c84192, entries=10, sequenceid=11, filesize=7.1 K 2024-11-22T03:38:50,951 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/.tmp/ns/5ffa4323e7ec494abe3a199039086185 as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/ns/5ffa4323e7ec494abe3a199039086185 2024-11-22T03:38:50,957 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/ns/5ffa4323e7ec494abe3a199039086185, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T03:38:50,958 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/.tmp/table/344c63629a7546de84020193ccd7ec9f as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/table/344c63629a7546de84020193ccd7ec9f 2024-11-22T03:38:50,963 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/table/344c63629a7546de84020193ccd7ec9f, entries=2, sequenceid=11, filesize=5.4 K 2024-11-22T03:38:50,964 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 107ms, sequenceid=11, compaction requested=false 2024-11-22T03:38:50,969 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T03:38:50,970 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:38:50,970 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:38:50,970 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246730856Running coprocessor pre-close hooks at 1732246730856Disabling compacts and flushes for region at 1732246730856Disabling writes for close at 1732246730857 (+1 ms)Obtaining lock to block concurrent updates at 1732246730857Preparing flush snapshotting stores in 1588230740 at 1732246730857Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732246730858 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732246730858Flushing 1588230740/info: creating writer at 1732246730858Flushing 1588230740/info: appending metadata at 1732246730877 (+19 ms)Flushing 1588230740/info: closing flushed file at 1732246730877Flushing 1588230740/ns: creating writer at 1732246730890 (+13 ms)Flushing 1588230740/ns: appending metadata at 1732246730907 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732246730907Flushing 1588230740/table: creating writer at 1732246730919 (+12 ms)Flushing 1588230740/table: appending metadata at 1732246730933 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732246730933Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c142503: reopening flushed file at 1732246730943 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@228144db: reopening flushed file at 1732246730950 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@770fea0d: reopening flushed file at 1732246730957 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 107ms, sequenceid=11, compaction requested=false at 1732246730964 (+7 ms)Writing region close event to WAL at 1732246730966 (+2 ms)Running coprocessor post-close hooks at 1732246730970 (+4 ms)Closed at 1732246730970 2024-11-22T03:38:50,970 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:38:51,056 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(976): stopping server b458937b0f5f,32923,1732246679221; all regions closed. 2024-11-22T03:38:51,057 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,057 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,057 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,057 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,057 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741834_1010 (size=3306) 2024-11-22T03:38:51,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741834_1010 (size=3306) 2024-11-22T03:38:51,062 DEBUG [RS:0;b458937b0f5f:32923 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/oldWALs 2024-11-22T03:38:51,062 INFO [RS:0;b458937b0f5f:32923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C32923%2C1732246679221.meta:.meta(num 1732246680270) 2024-11-22T03:38:51,062 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,062 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,062 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,063 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,063 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741844_1020 (size=1252) 2024-11-22T03:38:51,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741844_1020 (size=1252) 2024-11-22T03:38:51,067 DEBUG [RS:0;b458937b0f5f:32923 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/oldWALs 2024-11-22T03:38:51,067 INFO [RS:0;b458937b0f5f:32923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C32923%2C1732246679221:(num 1732246730798) 2024-11-22T03:38:51,067 DEBUG [RS:0;b458937b0f5f:32923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:38:51,067 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:38:51,067 INFO [RS:0;b458937b0f5f:32923 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:38:51,067 INFO [RS:0;b458937b0f5f:32923 {}] hbase.ChoreService(370): Chore service for: regionserver/b458937b0f5f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:38:51,068 INFO [RS:0;b458937b0f5f:32923 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:38:51,068 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:38:51,068 INFO [RS:0;b458937b0f5f:32923 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:32923 2024-11-22T03:38:51,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b458937b0f5f,32923,1732246679221 2024-11-22T03:38:51,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:38:51,074 INFO [RS:0;b458937b0f5f:32923 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:38:51,084 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b458937b0f5f,32923,1732246679221] 2024-11-22T03:38:51,095 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b458937b0f5f,32923,1732246679221 already deleted, retry=false 2024-11-22T03:38:51,095 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b458937b0f5f,32923,1732246679221 expired; onlineServers=0 2024-11-22T03:38:51,095 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b458937b0f5f,40691,1732246679052' ***** 2024-11-22T03:38:51,095 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:38:51,095 INFO [M:0;b458937b0f5f:40691 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:38:51,095 INFO [M:0;b458937b0f5f:40691 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:38:51,095 DEBUG [M:0;b458937b0f5f:40691 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:38:51,095 DEBUG [M:0;b458937b0f5f:40691 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:38:51,095 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:38:51,095 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246679551 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246679551,5,FailOnTimeoutGroup] 2024-11-22T03:38:51,095 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246679551 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246679551,5,FailOnTimeoutGroup] 2024-11-22T03:38:51,096 INFO [M:0;b458937b0f5f:40691 {}] hbase.ChoreService(370): Chore service for: master/b458937b0f5f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:38:51,096 INFO [M:0;b458937b0f5f:40691 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:38:51,096 DEBUG [M:0;b458937b0f5f:40691 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:38:51,096 INFO [M:0;b458937b0f5f:40691 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:38:51,096 INFO [M:0;b458937b0f5f:40691 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:38:51,096 INFO [M:0;b458937b0f5f:40691 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:38:51,096 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:38:51,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:38:51,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:51,105 DEBUG [M:0;b458937b0f5f:40691 {}] zookeeper.ZKUtil(347): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:38:51,105 WARN [M:0;b458937b0f5f:40691 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:38:51,106 INFO [M:0;b458937b0f5f:40691 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/.lastflushedseqids 2024-11-22T03:38:51,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741849_1025 (size=130) 2024-11-22T03:38:51,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741849_1025 (size=130) 2024-11-22T03:38:51,113 INFO [M:0;b458937b0f5f:40691 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:38:51,113 INFO [M:0;b458937b0f5f:40691 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:38:51,113 DEBUG [M:0;b458937b0f5f:40691 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:38:51,113 INFO [M:0;b458937b0f5f:40691 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:38:51,113 DEBUG [M:0;b458937b0f5f:40691 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:38:51,114 DEBUG [M:0;b458937b0f5f:40691 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:38:51,114 DEBUG [M:0;b458937b0f5f:40691 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:38:51,114 INFO [M:0;b458937b0f5f:40691 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.91 KB 2024-11-22T03:38:51,131 DEBUG [M:0;b458937b0f5f:40691 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3ae0dc1f730143cca0f434a5ac8510b2 is 82, key is hbase:meta,,1/info:regioninfo/1732246680303/Put/seqid=0 2024-11-22T03:38:51,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741850_1026 (size=5672) 2024-11-22T03:38:51,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741850_1026 (size=5672) 2024-11-22T03:38:51,136 INFO [M:0;b458937b0f5f:40691 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3ae0dc1f730143cca0f434a5ac8510b2 2024-11-22T03:38:51,156 DEBUG [M:0;b458937b0f5f:40691 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f1e7efe10f48420ca35fb5e0cd14a692 is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732246680850/Put/seqid=0 2024-11-22T03:38:51,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741851_1027 (size=7818) 2024-11-22T03:38:51,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741851_1027 (size=7818) 2024-11-22T03:38:51,162 INFO [M:0;b458937b0f5f:40691 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.94 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f1e7efe10f48420ca35fb5e0cd14a692 2024-11-22T03:38:51,167 INFO [M:0;b458937b0f5f:40691 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f1e7efe10f48420ca35fb5e0cd14a692 2024-11-22T03:38:51,184 DEBUG [M:0;b458937b0f5f:40691 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/da2a2f993f32409e807d7dc734fc8219 is 69, key is b458937b0f5f,32923,1732246679221/rs:state/1732246679672/Put/seqid=0 2024-11-22T03:38:51,184 INFO [RS:0;b458937b0f5f:32923 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:38:51,185 INFO [RS:0;b458937b0f5f:32923 {}] regionserver.HRegionServer(1031): Exiting; stopping=b458937b0f5f,32923,1732246679221; zookeeper connection closed. 2024-11-22T03:38:51,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:38:51,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32923-0x101609ea9a70001, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:38:51,185 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1e9e25cf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1e9e25cf 2024-11-22T03:38:51,186 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T03:38:51,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741852_1028 (size=5156) 2024-11-22T03:38:51,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741852_1028 (size=5156) 2024-11-22T03:38:51,189 INFO [M:0;b458937b0f5f:40691 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/da2a2f993f32409e807d7dc734fc8219 2024-11-22T03:38:51,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:51,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:51,208 DEBUG [M:0;b458937b0f5f:40691 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e59b967fe5be4c7d91ae5a0d5ea062ed is 52, key is load_balancer_on/state:d/1732246680456/Put/seqid=0 2024-11-22T03:38:51,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741853_1029 (size=5056) 2024-11-22T03:38:51,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741853_1029 (size=5056) 2024-11-22T03:38:51,213 INFO [M:0;b458937b0f5f:40691 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e59b967fe5be4c7d91ae5a0d5ea062ed 2024-11-22T03:38:51,219 DEBUG [M:0;b458937b0f5f:40691 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3ae0dc1f730143cca0f434a5ac8510b2 as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3ae0dc1f730143cca0f434a5ac8510b2 2024-11-22T03:38:51,224 INFO [M:0;b458937b0f5f:40691 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3ae0dc1f730143cca0f434a5ac8510b2, entries=8, sequenceid=121, filesize=5.5 K 2024-11-22T03:38:51,225 DEBUG [M:0;b458937b0f5f:40691 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f1e7efe10f48420ca35fb5e0cd14a692 as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f1e7efe10f48420ca35fb5e0cd14a692 2024-11-22T03:38:51,230 INFO [M:0;b458937b0f5f:40691 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f1e7efe10f48420ca35fb5e0cd14a692 2024-11-22T03:38:51,231 INFO [M:0;b458937b0f5f:40691 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f1e7efe10f48420ca35fb5e0cd14a692, entries=14, sequenceid=121, filesize=7.6 K 2024-11-22T03:38:51,232 DEBUG [M:0;b458937b0f5f:40691 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/da2a2f993f32409e807d7dc734fc8219 as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/da2a2f993f32409e807d7dc734fc8219 2024-11-22T03:38:51,238 INFO [M:0;b458937b0f5f:40691 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/da2a2f993f32409e807d7dc734fc8219, entries=1, sequenceid=121, filesize=5.0 K 2024-11-22T03:38:51,239 DEBUG [M:0;b458937b0f5f:40691 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e59b967fe5be4c7d91ae5a0d5ea062ed as hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e59b967fe5be4c7d91ae5a0d5ea062ed 2024-11-22T03:38:51,244 INFO [M:0;b458937b0f5f:40691 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46403/user/jenkins/test-data/465b4a95-b577-aa5a-9a5a-c5ae58ed2826/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e59b967fe5be4c7d91ae5a0d5ea062ed, entries=1, sequenceid=121, filesize=4.9 K 2024-11-22T03:38:51,245 INFO [M:0;b458937b0f5f:40691 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=121, compaction requested=false 2024-11-22T03:38:51,250 INFO [M:0;b458937b0f5f:40691 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:38:51,250 DEBUG [M:0;b458937b0f5f:40691 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246731113Disabling compacts and flushes for region at 1732246731113Disabling writes for close at 1732246731114 (+1 ms)Obtaining lock to block concurrent updates at 1732246731114Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732246731114Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44590, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1732246731114Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732246731115 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732246731115Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732246731131 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732246731131Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732246731141 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732246731155 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732246731155Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732246731167 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732246731183 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732246731183Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732246731194 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732246731207 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732246731207Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41497f2c: reopening flushed file at 1732246731218 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34441660: reopening flushed file at 1732246731224 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67d47022: reopening flushed file at 1732246731231 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fb84161: reopening flushed file at 1732246731238 (+7 ms)Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=121, compaction requested=false at 1732246731245 (+7 ms)Writing region close event to WAL at 1732246731250 (+5 ms)Closed at 1732246731250 2024-11-22T03:38:51,251 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,251 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,251 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,251 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,251 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:38:51,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34585 is added to blk_1073741830_1006 (size=52987) 2024-11-22T03:38:51,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741830_1006 (size=52987) 2024-11-22T03:38:51,254 INFO [M:0;b458937b0f5f:40691 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:38:51,254 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:38:51,254 INFO [M:0;b458937b0f5f:40691 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40691 2024-11-22T03:38:51,254 INFO [M:0;b458937b0f5f:40691 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:38:51,363 INFO [M:0;b458937b0f5f:40691 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:38:51,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:38:51,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40691-0x101609ea9a70000, quorum=127.0.0.1:64084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:38:51,366 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59d5bc4a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:38:51,367 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d30f349{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:38:51,367 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:38:51,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4921a1b1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:38:51,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2128b0cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/hadoop.log.dir/,STOPPED} 2024-11-22T03:38:51,369 WARN [BP-513654789-172.17.0.3-1732246676394 heartbeating to localhost/127.0.0.1:46403 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:38:51,369 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:38:51,369 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:38:51,369 WARN [BP-513654789-172.17.0.3-1732246676394 heartbeating to localhost/127.0.0.1:46403 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-513654789-172.17.0.3-1732246676394 (Datanode Uuid 5b2713d8-9de1-43e3-a383-f1a45d366bba) service to localhost/127.0.0.1:46403 2024-11-22T03:38:51,370 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/cluster_49f704a3-79b4-e8f2-009f-8c87de040efc/data/data3/current/BP-513654789-172.17.0.3-1732246676394 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:38:51,370 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/cluster_49f704a3-79b4-e8f2-009f-8c87de040efc/data/data4/current/BP-513654789-172.17.0.3-1732246676394 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:38:51,370 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:38:51,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26c94d53{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:38:51,373 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e1f0e5b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:38:51,373 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:38:51,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c8600db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:38:51,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7528c100{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/hadoop.log.dir/,STOPPED} 2024-11-22T03:38:51,375 WARN [BP-513654789-172.17.0.3-1732246676394 heartbeating to localhost/127.0.0.1:46403 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:38:51,375 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:38:51,375 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:38:51,375 WARN [BP-513654789-172.17.0.3-1732246676394 heartbeating to localhost/127.0.0.1:46403 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-513654789-172.17.0.3-1732246676394 (Datanode Uuid 32bb15c2-e35d-494b-aaf4-9678949ef5ff) service to localhost/127.0.0.1:46403 2024-11-22T03:38:51,376 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/cluster_49f704a3-79b4-e8f2-009f-8c87de040efc/data/data1/current/BP-513654789-172.17.0.3-1732246676394 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:38:51,376 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/cluster_49f704a3-79b4-e8f2-009f-8c87de040efc/data/data2/current/BP-513654789-172.17.0.3-1732246676394 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:38:51,377 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:38:51,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@dd9ec9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:38:51,383 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fa77a2d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:38:51,383 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:38:51,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d8b64e8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:38:51,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37c41708{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/hadoop.log.dir/,STOPPED} 2024-11-22T03:38:51,389 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:38:51,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:38:51,418 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 181) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46403 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:46403 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/b458937b0f5f:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46403 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:46403 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:46403 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46403 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46403 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:46403 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=183 (was 248), ProcessCount=11 (was 11), AvailableMemoryMB=5957 (was 6098) 2024-11-22T03:38:51,428 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=183, ProcessCount=11, AvailableMemoryMB=5957 2024-11-22T03:38:51,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:38:51,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/hadoop.log.dir so I do NOT create it in target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329 2024-11-22T03:38:51,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d8d0f83a-8998-aab2-ebcf-a5a917325bc9/hadoop.tmp.dir so I do NOT create it in target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329 2024-11-22T03:38:51,428 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/cluster_477d7af3-8b32-623c-a518-613b450cb07a, deleteOnExit=true 2024-11-22T03:38:51,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:38:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/test.cache.data in system properties and HBase conf 2024-11-22T03:38:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:38:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:38:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:38:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:38:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:38:51,429 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:38:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:38:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:38:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:38:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:38:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:38:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:38:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:38:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:38:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:38:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:38:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:38:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:38:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:38:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:38:51,443 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:38:51,687 INFO [regionserver/b458937b0f5f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:38:51,791 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:38:51,795 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:38:51,796 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:38:51,796 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:38:51,796 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:38:51,797 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:38:51,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74b22f54{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:38:51,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c149881{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:38:51,905 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@af49f38{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/java.io.tmpdir/jetty-localhost-33257-hadoop-hdfs-3_4_1-tests_jar-_-any-464034413565541164/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:38:51,906 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@240b8237{HTTP/1.1, (http/1.1)}{localhost:33257} 2024-11-22T03:38:51,906 INFO [Time-limited test {}] server.Server(415): Started @251324ms 2024-11-22T03:38:51,918 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:38:52,175 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:38:52,178 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:38:52,179 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:38:52,179 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:38:52,179 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:38:52,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58b83575{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:38:52,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b569bd2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:38:52,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:52,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:52,283 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e22ffa9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/java.io.tmpdir/jetty-localhost-45577-hadoop-hdfs-3_4_1-tests_jar-_-any-3073127159854878918/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:38:52,283 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b5eb244{HTTP/1.1, (http/1.1)}{localhost:45577} 2024-11-22T03:38:52,283 INFO [Time-limited test {}] server.Server(415): Started @251701ms 2024-11-22T03:38:52,284 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:38:52,313 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:38:52,316 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:38:52,316 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:38:52,316 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:38:52,317 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:38:52,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13fc1909{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:38:52,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@661bea50{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:38:52,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ed7c159{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/java.io.tmpdir/jetty-localhost-41407-hadoop-hdfs-3_4_1-tests_jar-_-any-15298816976653527441/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:38:52,432 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b675707{HTTP/1.1, (http/1.1)}{localhost:41407} 2024-11-22T03:38:52,432 INFO [Time-limited test {}] server.Server(415): Started @251850ms 2024-11-22T03:38:52,433 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:38:53,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:53,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:53,503 WARN [Thread-1968 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/cluster_477d7af3-8b32-623c-a518-613b450cb07a/data/data1/current/BP-575432354-172.17.0.3-1732246731445/current, will proceed with Du for space computation calculation, 2024-11-22T03:38:53,503 WARN [Thread-1969 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/cluster_477d7af3-8b32-623c-a518-613b450cb07a/data/data2/current/BP-575432354-172.17.0.3-1732246731445/current, will proceed with Du for space computation calculation, 2024-11-22T03:38:53,524 WARN [Thread-1932 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:38:53,526 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6224cb27152dd7b5 with lease ID 0x469a93f3bde13190: Processing first storage report for DS-16e92c8a-b757-4383-b59c-eef19a53d858 from datanode DatanodeRegistration(127.0.0.1:45927, datanodeUuid=cb1230d8-75e7-4a7b-88f4-58674992cc6c, infoPort=42925, infoSecurePort=0, ipcPort=34571, storageInfo=lv=-57;cid=testClusterID;nsid=1450689022;c=1732246731445) 2024-11-22T03:38:53,526 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6224cb27152dd7b5 with lease ID 0x469a93f3bde13190: from storage DS-16e92c8a-b757-4383-b59c-eef19a53d858 node DatanodeRegistration(127.0.0.1:45927, datanodeUuid=cb1230d8-75e7-4a7b-88f4-58674992cc6c, infoPort=42925, infoSecurePort=0, ipcPort=34571, storageInfo=lv=-57;cid=testClusterID;nsid=1450689022;c=1732246731445), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:38:53,526 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6224cb27152dd7b5 with lease ID 0x469a93f3bde13190: Processing first storage report for DS-98e0f751-fa99-4587-967d-a57b9ba32412 from datanode DatanodeRegistration(127.0.0.1:45927, datanodeUuid=cb1230d8-75e7-4a7b-88f4-58674992cc6c, infoPort=42925, infoSecurePort=0, ipcPort=34571, storageInfo=lv=-57;cid=testClusterID;nsid=1450689022;c=1732246731445) 2024-11-22T03:38:53,526 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6224cb27152dd7b5 with lease ID 0x469a93f3bde13190: from storage DS-98e0f751-fa99-4587-967d-a57b9ba32412 node DatanodeRegistration(127.0.0.1:45927, datanodeUuid=cb1230d8-75e7-4a7b-88f4-58674992cc6c, infoPort=42925, infoSecurePort=0, ipcPort=34571, storageInfo=lv=-57;cid=testClusterID;nsid=1450689022;c=1732246731445), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:38:53,643 WARN [Thread-1979 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/cluster_477d7af3-8b32-623c-a518-613b450cb07a/data/data3/current/BP-575432354-172.17.0.3-1732246731445/current, will proceed with Du for space computation calculation, 2024-11-22T03:38:53,643 WARN [Thread-1980 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/cluster_477d7af3-8b32-623c-a518-613b450cb07a/data/data4/current/BP-575432354-172.17.0.3-1732246731445/current, will proceed with Du for space computation calculation, 2024-11-22T03:38:53,666 WARN [Thread-1955 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:38:53,668 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6556c5110ed2e9ea with lease ID 0x469a93f3bde13191: Processing first storage report for DS-2dbdaf5e-6a0d-4b45-b46b-88794516f5a0 from datanode DatanodeRegistration(127.0.0.1:33755, datanodeUuid=34b1f1ad-42a1-494f-b4e2-d9c5d87684e7, infoPort=45483, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=1450689022;c=1732246731445) 2024-11-22T03:38:53,668 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6556c5110ed2e9ea with lease ID 0x469a93f3bde13191: from storage DS-2dbdaf5e-6a0d-4b45-b46b-88794516f5a0 node DatanodeRegistration(127.0.0.1:33755, datanodeUuid=34b1f1ad-42a1-494f-b4e2-d9c5d87684e7, infoPort=45483, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=1450689022;c=1732246731445), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:38:53,668 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6556c5110ed2e9ea with lease ID 0x469a93f3bde13191: Processing first storage report for DS-7763d40b-0a90-4128-a02f-c9c204f75e5d from datanode DatanodeRegistration(127.0.0.1:33755, datanodeUuid=34b1f1ad-42a1-494f-b4e2-d9c5d87684e7, infoPort=45483, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=1450689022;c=1732246731445) 2024-11-22T03:38:53,668 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6556c5110ed2e9ea with lease ID 0x469a93f3bde13191: from storage DS-7763d40b-0a90-4128-a02f-c9c204f75e5d node DatanodeRegistration(127.0.0.1:33755, datanodeUuid=34b1f1ad-42a1-494f-b4e2-d9c5d87684e7, infoPort=45483, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=1450689022;c=1732246731445), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:38:53,769 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329 2024-11-22T03:38:53,773 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/cluster_477d7af3-8b32-623c-a518-613b450cb07a/zookeeper_0, clientPort=62408, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/cluster_477d7af3-8b32-623c-a518-613b450cb07a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/cluster_477d7af3-8b32-623c-a518-613b450cb07a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:38:53,774 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62408 2024-11-22T03:38:53,775 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:38:53,776 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:38:53,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:38:53,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:38:53,788 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf with version=8 2024-11-22T03:38:53,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/hbase-staging 2024-11-22T03:38:53,790 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:38:53,791 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:38:53,791 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:38:53,791 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:38:53,791 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:38:53,791 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:38:53,791 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:38:53,791 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:38:53,792 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38087 2024-11-22T03:38:53,794 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38087 connecting to ZooKeeper ensemble=127.0.0.1:62408 2024-11-22T03:38:53,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:380870x0, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:38:53,852 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38087-0x101609f7f770000 connected 2024-11-22T03:38:53,937 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:38:53,939 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:38:53,941 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:38:53,941 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf, hbase.cluster.distributed=false 2024-11-22T03:38:53,942 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:38:53,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38087 2024-11-22T03:38:53,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38087 2024-11-22T03:38:53,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38087 2024-11-22T03:38:53,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38087 2024-11-22T03:38:53,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38087 2024-11-22T03:38:53,960 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:38:53,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:38:53,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:38:53,960 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:38:53,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:38:53,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:38:53,960 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:38:53,960 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:38:53,961 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45889 2024-11-22T03:38:53,963 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45889 connecting to ZooKeeper ensemble=127.0.0.1:62408 2024-11-22T03:38:53,964 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:38:53,966 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:38:53,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458890x0, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:38:53,979 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:458890x0, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:38:53,979 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45889-0x101609f7f770001 connected 2024-11-22T03:38:53,980 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:38:53,981 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:38:53,982 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:38:53,983 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:38:53,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45889 2024-11-22T03:38:53,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45889 2024-11-22T03:38:53,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45889 2024-11-22T03:38:53,988 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45889 2024-11-22T03:38:53,989 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45889 2024-11-22T03:38:54,000 DEBUG [M:0;b458937b0f5f:38087 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b458937b0f5f:38087 2024-11-22T03:38:54,001 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b458937b0f5f,38087,1732246733790 2024-11-22T03:38:54,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:38:54,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:38:54,011 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b458937b0f5f,38087,1732246733790 2024-11-22T03:38:54,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:38:54,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:54,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:54,021 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:38:54,022 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b458937b0f5f,38087,1732246733790 from backup master directory 2024-11-22T03:38:54,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b458937b0f5f,38087,1732246733790 2024-11-22T03:38:54,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:38:54,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:38:54,031 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:38:54,031 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b458937b0f5f,38087,1732246733790 2024-11-22T03:38:54,036 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/hbase.id] with ID: da241822-92ac-43c5-9e09-6e3f11d09da4 2024-11-22T03:38:54,036 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/.tmp/hbase.id 2024-11-22T03:38:54,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:38:54,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:38:54,042 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/.tmp/hbase.id]:[hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/hbase.id] 2024-11-22T03:38:54,053 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:38:54,053 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:38:54,055 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-22T03:38:54,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:54,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:54,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:38:54,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:38:54,070 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:38:54,070 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:38:54,071 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:38:54,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:38:54,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:38:54,078 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store 2024-11-22T03:38:54,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:38:54,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:38:54,085 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:38:54,085 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:38:54,086 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:38:54,086 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:38:54,086 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:38:54,086 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:38:54,086 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:38:54,086 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246734085Disabling compacts and flushes for region at 1732246734085Disabling writes for close at 1732246734086 (+1 ms)Writing region close event to WAL at 1732246734086Closed at 1732246734086 2024-11-22T03:38:54,087 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/.initializing 2024-11-22T03:38:54,087 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/WALs/b458937b0f5f,38087,1732246733790 2024-11-22T03:38:54,090 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C38087%2C1732246733790, suffix=, logDir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/WALs/b458937b0f5f,38087,1732246733790, archiveDir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/oldWALs, maxLogs=10 2024-11-22T03:38:54,091 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C38087%2C1732246733790.1732246734091 2024-11-22T03:38:54,096 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/WALs/b458937b0f5f,38087,1732246733790/b458937b0f5f%2C38087%2C1732246733790.1732246734091 2024-11-22T03:38:54,097 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45483:45483),(127.0.0.1/127.0.0.1:42925:42925)] 2024-11-22T03:38:54,098 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:38:54,098 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:38:54,098 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:38:54,098 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:38:54,099 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:38:54,100 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:38:54,101 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:54,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:38:54,102 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:38:54,102 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:38:54,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:38:54,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:38:54,104 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:38:54,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:38:54,105 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:38:54,106 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,106 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:38:54,106 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:38:54,107 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:38:54,107 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:38:54,108 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:38:54,108 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:38:54,109 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:38:54,110 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:38:54,113 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:38:54,113 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808525, jitterRate=0.028093203902244568}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:38:54,114 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732246734098Initializing all the Stores at 1732246734099 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246734099Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246734099Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246734099Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246734099Cleaning up temporary data from old regions at 1732246734108 (+9 ms)Region opened successfully at 1732246734114 (+6 ms) 2024-11-22T03:38:54,114 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:38:54,117 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28116317, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:38:54,118 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:38:54,118 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:38:54,118 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:38:54,119 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:38:54,119 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T03:38:54,119 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T03:38:54,119 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:38:54,122 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:38:54,122 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:38:54,126 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:38:54,126 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:38:54,127 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:38:54,137 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:38:54,137 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:38:54,138 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:38:54,147 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:38:54,148 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:38:54,158 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:38:54,160 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:38:54,168 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:38:54,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:38:54,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:38:54,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:54,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:54,179 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b458937b0f5f,38087,1732246733790, sessionid=0x101609f7f770000, setting cluster-up flag (Was=false) 2024-11-22T03:38:54,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:54,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:54,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:54,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:54,231 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:38:54,233 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,38087,1732246733790 2024-11-22T03:38:54,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:54,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:54,284 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:38:54,285 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,38087,1732246733790 2024-11-22T03:38:54,286 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:38:54,288 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:38:54,288 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:38:54,288 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:38:54,288 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b458937b0f5f,38087,1732246733790 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:38:54,290 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:38:54,290 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:38:54,290 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:38:54,290 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:38:54,290 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b458937b0f5f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:38:54,290 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,290 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:38:54,290 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,290 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(746): ClusterId : da241822-92ac-43c5-9e09-6e3f11d09da4 2024-11-22T03:38:54,290 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:38:54,291 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732246764291 2024-11-22T03:38:54,291 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:38:54,291 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:38:54,291 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:38:54,291 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:38:54,291 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:38:54,291 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:38:54,291 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,292 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:38:54,292 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:38:54,292 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:38:54,292 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:38:54,292 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:38:54,292 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:38:54,292 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:38:54,292 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246734292,5,FailOnTimeoutGroup] 2024-11-22T03:38:54,293 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246734293,5,FailOnTimeoutGroup] 2024-11-22T03:38:54,293 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,293 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,293 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:38:54,293 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,293 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,293 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:38:54,295 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:38:54,295 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:38:54,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:38:54,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:38:54,300 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:38:54,300 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf 2024-11-22T03:38:54,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:38:54,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:38:54,306 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:38:54,306 DEBUG [RS:0;b458937b0f5f:45889 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42223649, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:38:54,306 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:38:54,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:38:54,309 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:38:54,309 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:54,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:38:54,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:38:54,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:54,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:38:54,313 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:38:54,313 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:54,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:38:54,314 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:38:54,315 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:54,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:38:54,316 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740 2024-11-22T03:38:54,316 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740 2024-11-22T03:38:54,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:38:54,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:38:54,318 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:38:54,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:38:54,320 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:38:54,321 DEBUG [RS:0;b458937b0f5f:45889 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b458937b0f5f:45889 2024-11-22T03:38:54,321 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=776505, jitterRate=-0.012623190879821777}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:38:54,321 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:38:54,321 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:38:54,321 DEBUG [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:38:54,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732246734306Initializing all the Stores at 1732246734307 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246734307Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246734307Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246734307Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246734308 (+1 ms)Cleaning up temporary data from old regions at 1732246734317 (+9 ms)Region opened successfully at 1732246734321 (+4 ms) 2024-11-22T03:38:54,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:38:54,321 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:38:54,322 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:38:54,322 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(2659): reportForDuty to master=b458937b0f5f,38087,1732246733790 with port=45889, startcode=1732246733960 2024-11-22T03:38:54,322 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:38:54,322 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:38:54,322 DEBUG [RS:0;b458937b0f5f:45889 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:38:54,322 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:38:54,322 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246734321Disabling compacts and flushes for region at 1732246734321Disabling writes for close at 1732246734322 (+1 ms)Writing region close event to WAL at 1732246734322Closed at 1732246734322 2024-11-22T03:38:54,323 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:38:54,323 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:38:54,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:38:54,324 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49495, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:38:54,324 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38087 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b458937b0f5f,45889,1732246733960 2024-11-22T03:38:54,325 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38087 {}] master.ServerManager(517): Registering regionserver=b458937b0f5f,45889,1732246733960 2024-11-22T03:38:54,325 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:38:54,326 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:38:54,326 DEBUG [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf 2024-11-22T03:38:54,326 DEBUG [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41491 2024-11-22T03:38:54,326 DEBUG [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:38:54,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:38:54,399 DEBUG [RS:0;b458937b0f5f:45889 {}] zookeeper.ZKUtil(111): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b458937b0f5f,45889,1732246733960 2024-11-22T03:38:54,399 WARN [RS:0;b458937b0f5f:45889 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:38:54,399 INFO [RS:0;b458937b0f5f:45889 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:38:54,399 DEBUG [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/WALs/b458937b0f5f,45889,1732246733960 2024-11-22T03:38:54,399 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b458937b0f5f,45889,1732246733960] 2024-11-22T03:38:54,404 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:38:54,406 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:38:54,406 INFO [RS:0;b458937b0f5f:45889 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:38:54,406 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,407 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:38:54,408 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:38:54,408 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,408 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,408 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,408 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,408 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,408 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,408 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:38:54,409 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,409 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,409 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,409 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,409 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,409 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:38:54,409 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:38:54,409 DEBUG [RS:0;b458937b0f5f:45889 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:38:54,412 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,412 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,412 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,412 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,412 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,413 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,45889,1732246733960-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:38:54,432 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:38:54,433 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,45889,1732246733960-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,433 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,433 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.Replication(171): b458937b0f5f,45889,1732246733960 started 2024-11-22T03:38:54,449 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:54,449 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(1482): Serving as b458937b0f5f,45889,1732246733960, RpcServer on b458937b0f5f/172.17.0.3:45889, sessionid=0x101609f7f770001 2024-11-22T03:38:54,449 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:38:54,449 DEBUG [RS:0;b458937b0f5f:45889 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b458937b0f5f,45889,1732246733960 2024-11-22T03:38:54,449 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,45889,1732246733960' 2024-11-22T03:38:54,449 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:38:54,450 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:38:54,451 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:38:54,451 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:38:54,451 DEBUG [RS:0;b458937b0f5f:45889 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b458937b0f5f,45889,1732246733960 2024-11-22T03:38:54,451 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,45889,1732246733960' 2024-11-22T03:38:54,451 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:38:54,451 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:38:54,451 DEBUG [RS:0;b458937b0f5f:45889 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:38:54,451 INFO [RS:0;b458937b0f5f:45889 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:38:54,451 INFO [RS:0;b458937b0f5f:45889 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:38:54,476 WARN [b458937b0f5f:38087 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:38:54,554 INFO [RS:0;b458937b0f5f:45889 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C45889%2C1732246733960, suffix=, logDir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/WALs/b458937b0f5f,45889,1732246733960, archiveDir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/oldWALs, maxLogs=32 2024-11-22T03:38:54,555 INFO [RS:0;b458937b0f5f:45889 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C45889%2C1732246733960.1732246734555 2024-11-22T03:38:54,563 INFO [RS:0;b458937b0f5f:45889 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/WALs/b458937b0f5f,45889,1732246733960/b458937b0f5f%2C45889%2C1732246733960.1732246734555 2024-11-22T03:38:54,564 DEBUG [RS:0;b458937b0f5f:45889 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42925:42925),(127.0.0.1/127.0.0.1:45483:45483)] 2024-11-22T03:38:54,726 DEBUG [b458937b0f5f:38087 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:38:54,727 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b458937b0f5f,45889,1732246733960 2024-11-22T03:38:54,729 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,45889,1732246733960, state=OPENING 2024-11-22T03:38:54,789 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:38:54,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:54,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:38:54,800 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:38:54,800 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,45889,1732246733960}] 2024-11-22T03:38:54,800 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:38:54,801 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:38:54,954 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:38:54,956 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38567, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:38:54,961 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:38:54,961 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:38:54,963 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C45889%2C1732246733960.meta, suffix=.meta, logDir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/WALs/b458937b0f5f,45889,1732246733960, archiveDir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/oldWALs, maxLogs=32 2024-11-22T03:38:54,964 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C45889%2C1732246733960.meta.1732246734963.meta 2024-11-22T03:38:54,970 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/WALs/b458937b0f5f,45889,1732246733960/b458937b0f5f%2C45889%2C1732246733960.meta.1732246734963.meta 2024-11-22T03:38:54,971 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45483:45483),(127.0.0.1/127.0.0.1:42925:42925)] 2024-11-22T03:38:54,972 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:38:54,973 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:38:54,973 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:38:54,973 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:38:54,973 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:38:54,974 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:38:54,974 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:38:54,974 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:38:54,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:38:54,976 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:38:54,976 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,977 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:54,977 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:38:54,978 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:38:54,978 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:54,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:38:54,979 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:38:54,979 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:54,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:38:54,981 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:38:54,981 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:54,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:38:54,981 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:38:54,982 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740 2024-11-22T03:38:54,983 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740 2024-11-22T03:38:54,984 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:38:54,984 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:38:54,985 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:38:54,986 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:38:54,987 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=791954, jitterRate=0.007022678852081299}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:38:54,987 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:38:54,988 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732246734974Writing region info on filesystem at 1732246734974Initializing all the Stores at 1732246734975 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246734975Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246734975Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246734975Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246734975Cleaning up temporary data from old regions at 1732246734984 (+9 ms)Running coprocessor post-open hooks at 1732246734987 (+3 ms)Region opened successfully at 1732246734988 (+1 ms) 2024-11-22T03:38:54,989 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732246734954 2024-11-22T03:38:54,991 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:38:54,991 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:38:54,991 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b458937b0f5f,45889,1732246733960 2024-11-22T03:38:54,992 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,45889,1732246733960, state=OPEN 2024-11-22T03:38:55,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:38:55,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:38:55,053 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b458937b0f5f,45889,1732246733960 2024-11-22T03:38:55,053 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:38:55,053 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:38:55,056 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:38:55,056 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,45889,1732246733960 in 253 msec 2024-11-22T03:38:55,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:38:55,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 733 msec 2024-11-22T03:38:55,060 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:38:55,060 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:38:55,061 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:38:55,061 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,45889,1732246733960, seqNum=-1] 2024-11-22T03:38:55,062 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:38:55,063 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44151, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:38:55,068 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 780 msec 2024-11-22T03:38:55,068 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732246735068, completionTime=-1 2024-11-22T03:38:55,068 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:38:55,068 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:38:55,070 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:38:55,070 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732246795070 2024-11-22T03:38:55,070 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732246855070 2024-11-22T03:38:55,070 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T03:38:55,071 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,38087,1732246733790-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:55,071 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,38087,1732246733790-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:55,071 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,38087,1732246733790-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:55,071 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b458937b0f5f:38087, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:55,071 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:55,071 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:55,073 DEBUG [master/b458937b0f5f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:38:55,075 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.043sec 2024-11-22T03:38:55,075 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:38:55,075 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:38:55,075 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:38:55,075 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:38:55,075 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:38:55,075 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,38087,1732246733790-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:38:55,075 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,38087,1732246733790-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:38:55,077 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:38:55,078 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:38:55,078 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,38087,1732246733790-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:38:55,091 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ea594c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:38:55,091 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b458937b0f5f,38087,-1 for getting cluster id 2024-11-22T03:38:55,091 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:38:55,092 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'da241822-92ac-43c5-9e09-6e3f11d09da4' 2024-11-22T03:38:55,093 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:38:55,093 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "da241822-92ac-43c5-9e09-6e3f11d09da4" 2024-11-22T03:38:55,093 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fff24b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:38:55,093 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b458937b0f5f,38087,-1] 2024-11-22T03:38:55,093 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:38:55,094 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:38:55,094 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48174, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:38:55,095 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@718083b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:38:55,096 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:38:55,096 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,45889,1732246733960, seqNum=-1] 2024-11-22T03:38:55,097 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:38:55,098 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50980, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:38:55,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b458937b0f5f,38087,1732246733790 2024-11-22T03:38:55,100 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:38:55,103 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:38:55,103 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T03:38:55,105 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is b458937b0f5f,38087,1732246733790 2024-11-22T03:38:55,105 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2d29d2f5 2024-11-22T03:38:55,105 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T03:38:55,106 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48184, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T03:38:55,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38087 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T03:38:55,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38087 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T03:38:55,107 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38087 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:38:55,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38087 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-22T03:38:55,110 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T03:38:55,110 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:55,110 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38087 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-22T03:38:55,111 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T03:38:55,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38087 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:38:55,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741835_1011 (size=381) 2024-11-22T03:38:55,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741835_1011 (size=381) 2024-11-22T03:38:55,120 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c9836b67248bd41ebb2ddc55d1b69d7f, NAME => 'TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf 2024-11-22T03:38:55,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741836_1012 (size=64) 2024-11-22T03:38:55,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741836_1012 (size=64) 2024-11-22T03:38:55,127 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:38:55,128 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing c9836b67248bd41ebb2ddc55d1b69d7f, disabling compactions & flushes 2024-11-22T03:38:55,128 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:38:55,128 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:38:55,128 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. after waiting 0 ms 2024-11-22T03:38:55,128 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:38:55,128 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:38:55,128 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c9836b67248bd41ebb2ddc55d1b69d7f: Waiting for close lock at 1732246735128Disabling compacts and flushes for region at 1732246735128Disabling writes for close at 1732246735128Writing region close event to WAL at 1732246735128Closed at 1732246735128 2024-11-22T03:38:55,129 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T03:38:55,129 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732246735129"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732246735129"}]},"ts":"1732246735129"} 2024-11-22T03:38:55,132 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T03:38:55,133 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T03:38:55,133 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732246735133"}]},"ts":"1732246735133"} 2024-11-22T03:38:55,135 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-22T03:38:55,135 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c9836b67248bd41ebb2ddc55d1b69d7f, ASSIGN}] 2024-11-22T03:38:55,137 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c9836b67248bd41ebb2ddc55d1b69d7f, ASSIGN 2024-11-22T03:38:55,138 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c9836b67248bd41ebb2ddc55d1b69d7f, ASSIGN; state=OFFLINE, location=b458937b0f5f,45889,1732246733960; forceNewPlan=false, retain=false 2024-11-22T03:38:55,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:55,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:55,289 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c9836b67248bd41ebb2ddc55d1b69d7f, regionState=OPENING, regionLocation=b458937b0f5f,45889,1732246733960 2024-11-22T03:38:55,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c9836b67248bd41ebb2ddc55d1b69d7f, ASSIGN because future has completed 2024-11-22T03:38:55,293 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c9836b67248bd41ebb2ddc55d1b69d7f, server=b458937b0f5f,45889,1732246733960}] 2024-11-22T03:38:55,451 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:38:55,451 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c9836b67248bd41ebb2ddc55d1b69d7f, NAME => 'TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:38:55,452 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:38:55,452 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:38:55,452 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:38:55,452 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:38:55,454 INFO [StoreOpener-c9836b67248bd41ebb2ddc55d1b69d7f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:38:55,456 INFO [StoreOpener-c9836b67248bd41ebb2ddc55d1b69d7f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c9836b67248bd41ebb2ddc55d1b69d7f columnFamilyName info 2024-11-22T03:38:55,456 DEBUG [StoreOpener-c9836b67248bd41ebb2ddc55d1b69d7f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:38:55,457 INFO [StoreOpener-c9836b67248bd41ebb2ddc55d1b69d7f-1 {}] regionserver.HStore(327): Store=c9836b67248bd41ebb2ddc55d1b69d7f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:38:55,457 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:38:55,458 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:38:55,458 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:38:55,458 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:38:55,458 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:38:55,460 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:38:55,463 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:38:55,463 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c9836b67248bd41ebb2ddc55d1b69d7f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=869632, jitterRate=0.10579435527324677}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:38:55,463 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:38:55,464 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c9836b67248bd41ebb2ddc55d1b69d7f: Running coprocessor pre-open hook at 1732246735452Writing region info on filesystem at 1732246735452Initializing all the Stores at 1732246735453 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246735453Cleaning up temporary data from old regions at 1732246735458 (+5 ms)Running coprocessor post-open hooks at 1732246735463 (+5 ms)Region opened successfully at 1732246735464 (+1 ms) 2024-11-22T03:38:55,465 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., pid=6, masterSystemTime=1732246735446 2024-11-22T03:38:55,468 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:38:55,468 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:38:55,469 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c9836b67248bd41ebb2ddc55d1b69d7f, regionState=OPEN, openSeqNum=2, regionLocation=b458937b0f5f,45889,1732246733960 2024-11-22T03:38:55,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c9836b67248bd41ebb2ddc55d1b69d7f, server=b458937b0f5f,45889,1732246733960 because future has completed 2024-11-22T03:38:55,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T03:38:55,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c9836b67248bd41ebb2ddc55d1b69d7f, server=b458937b0f5f,45889,1732246733960 in 179 msec 2024-11-22T03:38:55,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T03:38:55,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c9836b67248bd41ebb2ddc55d1b69d7f, ASSIGN in 340 msec 2024-11-22T03:38:55,478 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T03:38:55,478 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732246735478"}]},"ts":"1732246735478"} 2024-11-22T03:38:55,481 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-22T03:38:55,482 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T03:38:55,484 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 375 msec 2024-11-22T03:38:55,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,907 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,934 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,934 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,935 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:55,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:56,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:56,443 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:38:56,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,473 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,473 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,473 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:56,481 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:38:57,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:57,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:57,891 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-22T03:38:57,892 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T03:38:57,892 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:38:58,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:58,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:59,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:38:59,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:00,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:00,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:00,404 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T03:39:00,406 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-22T03:39:01,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:01,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:02,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:02,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:03,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:03,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:03,396 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:39:03,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:03,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:04,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:04,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:05,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38087 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:39:05,196 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-22T03:39:05,196 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-22T03:39:05,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:05,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:05,203 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-22T03:39:05,203 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:39:05,207 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., hostname=b458937b0f5f,45889,1732246733960, seqNum=2] 2024-11-22T03:39:05,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:39:05,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c9836b67248bd41ebb2ddc55d1b69d7f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:39:05,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/8c2e319b17064559b1b1a985f965a39c is 1080, key is row0001/info:/1732246745208/Put/seqid=0 2024-11-22T03:39:05,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741837_1013 (size=12509) 2024-11-22T03:39:05,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741837_1013 (size=12509) 2024-11-22T03:39:05,250 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/8c2e319b17064559b1b1a985f965a39c 2024-11-22T03:39:05,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/8c2e319b17064559b1b1a985f965a39c as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/8c2e319b17064559b1b1a985f965a39c 2024-11-22T03:39:05,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/8c2e319b17064559b1b1a985f965a39c, entries=7, sequenceid=11, filesize=12.2 K 2024-11-22T03:39:05,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c9836b67248bd41ebb2ddc55d1b69d7f, server=b458937b0f5f,45889,1732246733960 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-22T03:39:05,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for c9836b67248bd41ebb2ddc55d1b69d7f in 42ms, sequenceid=11, compaction requested=false 2024-11-22T03:39:05,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c9836b67248bd41ebb2ddc55d1b69d7f: 2024-11-22T03:39:05,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:50980 deadline: 1732246755263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c9836b67248bd41ebb2ddc55d1b69d7f, server=b458937b0f5f,45889,1732246733960 2024-11-22T03:39:05,286 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., hostname=b458937b0f5f,45889,1732246733960, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., hostname=b458937b0f5f,45889,1732246733960, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c9836b67248bd41ebb2ddc55d1b69d7f, server=b458937b0f5f,45889,1732246733960 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T03:39:05,287 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., hostname=b458937b0f5f,45889,1732246733960, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c9836b67248bd41ebb2ddc55d1b69d7f, server=b458937b0f5f,45889,1732246733960 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T03:39:05,287 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., hostname=b458937b0f5f,45889,1732246733960, seqNum=2 because the exception is null or not the one we care about 2024-11-22T03:39:06,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:06,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:07,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:07,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:08,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:08,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:09,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:09,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:10,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:10,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:11,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:11,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:12,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:12,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:13,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:13,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:14,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:14,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:15,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:15,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:15,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:39:15,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c9836b67248bd41ebb2ddc55d1b69d7f 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-22T03:39:15,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/876a0da2428c47dca5f49f0f12e70ec0 is 1080, key is row0008/info:/1732246745223/Put/seqid=0 2024-11-22T03:39:15,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741838_1014 (size=29761) 2024-11-22T03:39:15,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741838_1014 (size=29761) 2024-11-22T03:39:15,368 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/876a0da2428c47dca5f49f0f12e70ec0 2024-11-22T03:39:15,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/876a0da2428c47dca5f49f0f12e70ec0 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/876a0da2428c47dca5f49f0f12e70ec0 2024-11-22T03:39:15,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/876a0da2428c47dca5f49f0f12e70ec0, entries=23, sequenceid=37, filesize=29.1 K 2024-11-22T03:39:15,381 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for c9836b67248bd41ebb2ddc55d1b69d7f in 26ms, sequenceid=37, compaction requested=false 2024-11-22T03:39:15,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c9836b67248bd41ebb2ddc55d1b69d7f: 2024-11-22T03:39:15,381 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-22T03:39:15,382 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:39:15,382 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/876a0da2428c47dca5f49f0f12e70ec0 because midkey is the same as first or last row 2024-11-22T03:39:16,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:16,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:17,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:17,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:17,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:39:17,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c9836b67248bd41ebb2ddc55d1b69d7f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:39:17,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/78d4786e53fa40ccb10687782925a31d is 1080, key is row0031/info:/1732246755357/Put/seqid=0 2024-11-22T03:39:17,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741839_1015 (size=12509) 2024-11-22T03:39:17,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741839_1015 (size=12509) 2024-11-22T03:39:17,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/78d4786e53fa40ccb10687782925a31d 2024-11-22T03:39:17,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/78d4786e53fa40ccb10687782925a31d as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/78d4786e53fa40ccb10687782925a31d 2024-11-22T03:39:17,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/78d4786e53fa40ccb10687782925a31d, entries=7, sequenceid=47, filesize=12.2 K 2024-11-22T03:39:17,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for c9836b67248bd41ebb2ddc55d1b69d7f in 34ms, sequenceid=47, compaction requested=true 2024-11-22T03:39:17,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c9836b67248bd41ebb2ddc55d1b69d7f: 2024-11-22T03:39:17,412 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-22T03:39:17,412 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:39:17,413 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/876a0da2428c47dca5f49f0f12e70ec0 because midkey is the same as first or last row 2024-11-22T03:39:17,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c9836b67248bd41ebb2ddc55d1b69d7f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:39:17,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:17,413 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:39:17,414 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:39:17,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:39:17,415 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1541): c9836b67248bd41ebb2ddc55d1b69d7f/info is initiating minor compaction (all files) 2024-11-22T03:39:17,415 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c9836b67248bd41ebb2ddc55d1b69d7f/info in TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:39:17,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c9836b67248bd41ebb2ddc55d1b69d7f 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-22T03:39:17,415 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/8c2e319b17064559b1b1a985f965a39c, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/876a0da2428c47dca5f49f0f12e70ec0, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/78d4786e53fa40ccb10687782925a31d] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp, totalSize=53.5 K 2024-11-22T03:39:17,415 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8c2e319b17064559b1b1a985f965a39c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732246745208 2024-11-22T03:39:17,416 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 876a0da2428c47dca5f49f0f12e70ec0, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732246745223 2024-11-22T03:39:17,416 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 78d4786e53fa40ccb10687782925a31d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732246755357 2024-11-22T03:39:17,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/0c866646b3784f4a8fe6467eca75418e is 1080, key is row0038/info:/1732246757379/Put/seqid=0 2024-11-22T03:39:17,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741840_1016 (size=22222) 2024-11-22T03:39:17,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741840_1016 (size=22222) 2024-11-22T03:39:17,425 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/0c866646b3784f4a8fe6467eca75418e 2024-11-22T03:39:17,433 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c9836b67248bd41ebb2ddc55d1b69d7f#info#compaction#59 average throughput is 12.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:39:17,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/0c866646b3784f4a8fe6467eca75418e as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/0c866646b3784f4a8fe6467eca75418e 2024-11-22T03:39:17,434 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/9d51ff54a01c463e837cfc1ac3a65a3f is 1080, key is row0001/info:/1732246745208/Put/seqid=0 2024-11-22T03:39:17,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741841_1017 (size=44978) 2024-11-22T03:39:17,440 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/0c866646b3784f4a8fe6467eca75418e, entries=16, sequenceid=66, filesize=21.7 K 2024-11-22T03:39:17,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741841_1017 (size=44978) 2024-11-22T03:39:17,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=10.51 KB/10760 for c9836b67248bd41ebb2ddc55d1b69d7f in 26ms, sequenceid=66, compaction requested=false 2024-11-22T03:39:17,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c9836b67248bd41ebb2ddc55d1b69d7f: 2024-11-22T03:39:17,442 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=75.2 K, sizeToCheck=16.0 K 2024-11-22T03:39:17,442 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:39:17,442 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/876a0da2428c47dca5f49f0f12e70ec0 because midkey is the same as first or last row 2024-11-22T03:39:17,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:39:17,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c9836b67248bd41ebb2ddc55d1b69d7f 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-22T03:39:17,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/e8f5fe51ee164aeca8e62651969eecb0 is 1080, key is row0054/info:/1732246757416/Put/seqid=0 2024-11-22T03:39:17,448 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/9d51ff54a01c463e837cfc1ac3a65a3f as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/9d51ff54a01c463e837cfc1ac3a65a3f 2024-11-22T03:39:17,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741842_1018 (size=16817) 2024-11-22T03:39:17,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741842_1018 (size=16817) 2024-11-22T03:39:17,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/e8f5fe51ee164aeca8e62651969eecb0 2024-11-22T03:39:17,456 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c9836b67248bd41ebb2ddc55d1b69d7f/info of c9836b67248bd41ebb2ddc55d1b69d7f into 9d51ff54a01c463e837cfc1ac3a65a3f(size=43.9 K), total size for store is 65.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:39:17,456 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c9836b67248bd41ebb2ddc55d1b69d7f: 2024-11-22T03:39:17,456 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., storeName=c9836b67248bd41ebb2ddc55d1b69d7f/info, priority=13, startTime=1732246757413; duration=0sec 2024-11-22T03:39:17,456 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.6 K, sizeToCheck=16.0 K 2024-11-22T03:39:17,456 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:39:17,456 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/9d51ff54a01c463e837cfc1ac3a65a3f because midkey is the same as first or last row 2024-11-22T03:39:17,456 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.6 K, sizeToCheck=16.0 K 2024-11-22T03:39:17,456 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:39:17,456 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/9d51ff54a01c463e837cfc1ac3a65a3f because midkey is the same as first or last row 2024-11-22T03:39:17,456 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.6 K, sizeToCheck=16.0 K 2024-11-22T03:39:17,457 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:39:17,457 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/9d51ff54a01c463e837cfc1ac3a65a3f because midkey is the same as first or last row 2024-11-22T03:39:17,457 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:17,457 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c9836b67248bd41ebb2ddc55d1b69d7f:info 2024-11-22T03:39:17,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/e8f5fe51ee164aeca8e62651969eecb0 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/e8f5fe51ee164aeca8e62651969eecb0 2024-11-22T03:39:17,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/e8f5fe51ee164aeca8e62651969eecb0, entries=11, sequenceid=80, filesize=16.4 K 2024-11-22T03:39:17,468 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for c9836b67248bd41ebb2ddc55d1b69d7f in 25ms, sequenceid=80, compaction requested=true 2024-11-22T03:39:17,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c9836b67248bd41ebb2ddc55d1b69d7f: 2024-11-22T03:39:17,468 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=82.0 K, sizeToCheck=16.0 K 2024-11-22T03:39:17,468 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:39:17,468 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/9d51ff54a01c463e837cfc1ac3a65a3f because midkey is the same as first or last row 2024-11-22T03:39:17,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c9836b67248bd41ebb2ddc55d1b69d7f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:39:17,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:17,472 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:39:17,474 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84017 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:39:17,474 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1541): c9836b67248bd41ebb2ddc55d1b69d7f/info is initiating minor compaction (all files) 2024-11-22T03:39:17,474 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c9836b67248bd41ebb2ddc55d1b69d7f/info in TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:39:17,474 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/9d51ff54a01c463e837cfc1ac3a65a3f, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/0c866646b3784f4a8fe6467eca75418e, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/e8f5fe51ee164aeca8e62651969eecb0] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp, totalSize=82.0 K 2024-11-22T03:39:17,474 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9d51ff54a01c463e837cfc1ac3a65a3f, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732246745208 2024-11-22T03:39:17,475 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0c866646b3784f4a8fe6467eca75418e, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1732246757379 2024-11-22T03:39:17,475 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting e8f5fe51ee164aeca8e62651969eecb0, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732246757416 2024-11-22T03:39:17,489 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c9836b67248bd41ebb2ddc55d1b69d7f#info#compaction#61 average throughput is 32.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:39:17,490 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/138e528794e64fbaac8e106401cc2bea is 1080, key is row0001/info:/1732246745208/Put/seqid=0 2024-11-22T03:39:17,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741843_1019 (size=74301) 2024-11-22T03:39:17,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741843_1019 (size=74301) 2024-11-22T03:39:17,499 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/.tmp/info/138e528794e64fbaac8e106401cc2bea as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/138e528794e64fbaac8e106401cc2bea 2024-11-22T03:39:17,505 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c9836b67248bd41ebb2ddc55d1b69d7f/info of c9836b67248bd41ebb2ddc55d1b69d7f into 138e528794e64fbaac8e106401cc2bea(size=72.6 K), total size for store is 72.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:39:17,505 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c9836b67248bd41ebb2ddc55d1b69d7f: 2024-11-22T03:39:17,505 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., storeName=c9836b67248bd41ebb2ddc55d1b69d7f/info, priority=13, startTime=1732246757468; duration=0sec 2024-11-22T03:39:17,506 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-22T03:39:17,506 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:39:17,506 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-22T03:39:17,506 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:39:17,506 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-22T03:39:17,506 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:39:17,507 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:17,507 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:17,507 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c9836b67248bd41ebb2ddc55d1b69d7f:info 2024-11-22T03:39:17,508 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38087 {}] assignment.AssignmentManager(1363): Split request from b458937b0f5f,45889,1732246733960, parent={ENCODED => c9836b67248bd41ebb2ddc55d1b69d7f, NAME => 'TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-22T03:39:17,512 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38087 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=b458937b0f5f,45889,1732246733960 2024-11-22T03:39:17,515 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38087 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c9836b67248bd41ebb2ddc55d1b69d7f, daughterA=16360f66cda3d64794cad5bf6d13c85c, daughterB=bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:17,516 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c9836b67248bd41ebb2ddc55d1b69d7f, daughterA=16360f66cda3d64794cad5bf6d13c85c, daughterB=bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:17,516 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c9836b67248bd41ebb2ddc55d1b69d7f, daughterA=16360f66cda3d64794cad5bf6d13c85c, daughterB=bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:17,516 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c9836b67248bd41ebb2ddc55d1b69d7f, daughterA=16360f66cda3d64794cad5bf6d13c85c, daughterB=bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:17,523 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c9836b67248bd41ebb2ddc55d1b69d7f, UNASSIGN}] 2024-11-22T03:39:17,524 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c9836b67248bd41ebb2ddc55d1b69d7f, UNASSIGN 2024-11-22T03:39:17,526 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=c9836b67248bd41ebb2ddc55d1b69d7f, regionState=CLOSING, regionLocation=b458937b0f5f,45889,1732246733960 2024-11-22T03:39:17,528 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c9836b67248bd41ebb2ddc55d1b69d7f, UNASSIGN because future has completed 2024-11-22T03:39:17,528 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-22T03:39:17,529 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure c9836b67248bd41ebb2ddc55d1b69d7f, server=b458937b0f5f,45889,1732246733960}] 2024-11-22T03:39:17,685 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:39:17,685 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-22T03:39:17,686 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing c9836b67248bd41ebb2ddc55d1b69d7f, disabling compactions & flushes 2024-11-22T03:39:17,686 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:39:17,686 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:39:17,686 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. after waiting 0 ms 2024-11-22T03:39:17,686 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:39:17,687 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/8c2e319b17064559b1b1a985f965a39c, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/876a0da2428c47dca5f49f0f12e70ec0, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/9d51ff54a01c463e837cfc1ac3a65a3f, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/78d4786e53fa40ccb10687782925a31d, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/0c866646b3784f4a8fe6467eca75418e, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/e8f5fe51ee164aeca8e62651969eecb0] to archive 2024-11-22T03:39:17,688 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T03:39:17,690 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/8c2e319b17064559b1b1a985f965a39c to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/8c2e319b17064559b1b1a985f965a39c 2024-11-22T03:39:17,691 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/876a0da2428c47dca5f49f0f12e70ec0 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/876a0da2428c47dca5f49f0f12e70ec0 2024-11-22T03:39:17,693 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/9d51ff54a01c463e837cfc1ac3a65a3f to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/9d51ff54a01c463e837cfc1ac3a65a3f 2024-11-22T03:39:17,694 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/78d4786e53fa40ccb10687782925a31d to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/78d4786e53fa40ccb10687782925a31d 2024-11-22T03:39:17,695 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/0c866646b3784f4a8fe6467eca75418e to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/0c866646b3784f4a8fe6467eca75418e 2024-11-22T03:39:17,697 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/e8f5fe51ee164aeca8e62651969eecb0 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/e8f5fe51ee164aeca8e62651969eecb0 2024-11-22T03:39:17,703 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=1 2024-11-22T03:39:17,704 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. 2024-11-22T03:39:17,704 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for c9836b67248bd41ebb2ddc55d1b69d7f: Waiting for close lock at 1732246757686Running coprocessor pre-close hooks at 1732246757686Disabling compacts and flushes for region at 1732246757686Disabling writes for close at 1732246757686Writing region close event to WAL at 1732246757699 (+13 ms)Running coprocessor post-close hooks at 1732246757704 (+5 ms)Closed at 1732246757704 2024-11-22T03:39:17,706 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:39:17,707 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=c9836b67248bd41ebb2ddc55d1b69d7f, regionState=CLOSED 2024-11-22T03:39:17,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure c9836b67248bd41ebb2ddc55d1b69d7f, server=b458937b0f5f,45889,1732246733960 because future has completed 2024-11-22T03:39:17,713 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-22T03:39:17,713 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure c9836b67248bd41ebb2ddc55d1b69d7f, server=b458937b0f5f,45889,1732246733960 in 183 msec 2024-11-22T03:39:17,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-22T03:39:17,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c9836b67248bd41ebb2ddc55d1b69d7f, UNASSIGN in 190 msec 2024-11-22T03:39:17,724 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:39:17,728 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 1 storefiles, region=c9836b67248bd41ebb2ddc55d1b69d7f, threads=1 2024-11-22T03:39:17,731 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/138e528794e64fbaac8e106401cc2bea for region: c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:39:17,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741844_1020 (size=27) 2024-11-22T03:39:17,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741844_1020 (size=27) 2024-11-22T03:39:17,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741845_1021 (size=27) 2024-11-22T03:39:17,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741845_1021 (size=27) 2024-11-22T03:39:17,891 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-22T03:39:18,167 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/138e528794e64fbaac8e106401cc2bea for region: c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:39:18,169 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region c9836b67248bd41ebb2ddc55d1b69d7f Daughter A: [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c/info/138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f] storefiles, Daughter B: [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f] storefiles. 2024-11-22T03:39:18,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741846_1022 (size=71) 2024-11-22T03:39:18,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741846_1022 (size=71) 2024-11-22T03:39:18,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:18,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:18,584 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:39:18,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741847_1023 (size=71) 2024-11-22T03:39:18,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741847_1023 (size=71) 2024-11-22T03:39:18,601 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:39:18,612 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-22T03:39:18,614 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-22T03:39:18,617 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732246758617"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732246758617"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732246758617"}]},"ts":"1732246758617"} 2024-11-22T03:39:18,618 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732246758617"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732246758617"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732246758617"}]},"ts":"1732246758617"} 2024-11-22T03:39:18,618 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732246758617"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732246758617"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732246758617"}]},"ts":"1732246758617"} 2024-11-22T03:39:18,640 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=16360f66cda3d64794cad5bf6d13c85c, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bb4100e5d98197e7f8bb8bce119bab6f, ASSIGN}] 2024-11-22T03:39:18,642 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=16360f66cda3d64794cad5bf6d13c85c, ASSIGN 2024-11-22T03:39:18,642 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bb4100e5d98197e7f8bb8bce119bab6f, ASSIGN 2024-11-22T03:39:18,643 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=16360f66cda3d64794cad5bf6d13c85c, ASSIGN; state=SPLITTING_NEW, location=b458937b0f5f,45889,1732246733960; forceNewPlan=false, retain=false 2024-11-22T03:39:18,643 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bb4100e5d98197e7f8bb8bce119bab6f, ASSIGN; state=SPLITTING_NEW, location=b458937b0f5f,45889,1732246733960; forceNewPlan=false, retain=false 2024-11-22T03:39:18,794 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=16360f66cda3d64794cad5bf6d13c85c, regionState=OPENING, regionLocation=b458937b0f5f,45889,1732246733960 2024-11-22T03:39:18,794 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=bb4100e5d98197e7f8bb8bce119bab6f, regionState=OPENING, regionLocation=b458937b0f5f,45889,1732246733960 2024-11-22T03:39:18,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bb4100e5d98197e7f8bb8bce119bab6f, ASSIGN because future has completed 2024-11-22T03:39:18,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure bb4100e5d98197e7f8bb8bce119bab6f, server=b458937b0f5f,45889,1732246733960}] 2024-11-22T03:39:18,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=16360f66cda3d64794cad5bf6d13c85c, ASSIGN because future has completed 2024-11-22T03:39:18,800 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 16360f66cda3d64794cad5bf6d13c85c, server=b458937b0f5f,45889,1732246733960}] 2024-11-22T03:39:18,955 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:39:18,955 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => bb4100e5d98197e7f8bb8bce119bab6f, NAME => 'TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-22T03:39:18,956 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:18,956 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:39:18,956 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:18,956 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:18,958 INFO [StoreOpener-bb4100e5d98197e7f8bb8bce119bab6f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:18,959 INFO [StoreOpener-bb4100e5d98197e7f8bb8bce119bab6f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb4100e5d98197e7f8bb8bce119bab6f columnFamilyName info 2024-11-22T03:39:18,959 DEBUG [StoreOpener-bb4100e5d98197e7f8bb8bce119bab6f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:39:18,974 DEBUG [StoreOpener-bb4100e5d98197e7f8bb8bce119bab6f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f->hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/138e528794e64fbaac8e106401cc2bea-top 2024-11-22T03:39:18,975 INFO [StoreOpener-bb4100e5d98197e7f8bb8bce119bab6f-1 {}] regionserver.HStore(327): Store=bb4100e5d98197e7f8bb8bce119bab6f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:39:18,975 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:18,976 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:18,977 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:18,978 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:18,978 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:18,980 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:18,980 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened bb4100e5d98197e7f8bb8bce119bab6f; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802145, jitterRate=0.019981130957603455}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:39:18,981 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:18,981 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for bb4100e5d98197e7f8bb8bce119bab6f: Running coprocessor pre-open hook at 1732246758956Writing region info on filesystem at 1732246758956Initializing all the Stores at 1732246758957 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246758957Cleaning up temporary data from old regions at 1732246758978 (+21 ms)Running coprocessor post-open hooks at 1732246758981 (+3 ms)Region opened successfully at 1732246758981 2024-11-22T03:39:18,982 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., pid=12, masterSystemTime=1732246758951 2024-11-22T03:39:18,982 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store bb4100e5d98197e7f8bb8bce119bab6f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:39:18,982 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-22T03:39:18,982 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:18,984 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:39:18,984 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1541): bb4100e5d98197e7f8bb8bce119bab6f/info is initiating minor compaction (all files) 2024-11-22T03:39:18,984 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb4100e5d98197e7f8bb8bce119bab6f/info in TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:39:18,984 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f->hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/138e528794e64fbaac8e106401cc2bea-top] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp, totalSize=72.6 K 2024-11-22T03:39:18,985 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732246745208 2024-11-22T03:39:18,986 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:39:18,986 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:39:18,986 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c. 2024-11-22T03:39:18,987 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=bb4100e5d98197e7f8bb8bce119bab6f, regionState=OPEN, openSeqNum=86, regionLocation=b458937b0f5f,45889,1732246733960 2024-11-22T03:39:18,987 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 16360f66cda3d64794cad5bf6d13c85c, NAME => 'TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-22T03:39:18,987 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 16360f66cda3d64794cad5bf6d13c85c 2024-11-22T03:39:18,987 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:39:18,988 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 16360f66cda3d64794cad5bf6d13c85c 2024-11-22T03:39:18,988 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 16360f66cda3d64794cad5bf6d13c85c 2024-11-22T03:39:18,990 INFO [StoreOpener-16360f66cda3d64794cad5bf6d13c85c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 16360f66cda3d64794cad5bf6d13c85c 2024-11-22T03:39:18,990 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-22T03:39:18,990 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-22T03:39:18,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-22T03:39:18,990 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure bb4100e5d98197e7f8bb8bce119bab6f, server=b458937b0f5f,45889,1732246733960 because future has completed 2024-11-22T03:39:18,992 INFO [StoreOpener-16360f66cda3d64794cad5bf6d13c85c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 16360f66cda3d64794cad5bf6d13c85c columnFamilyName info 2024-11-22T03:39:18,992 DEBUG [StoreOpener-16360f66cda3d64794cad5bf6d13c85c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:39:18,997 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-22T03:39:18,997 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure bb4100e5d98197e7f8bb8bce119bab6f, server=b458937b0f5f,45889,1732246733960 in 196 msec 2024-11-22T03:39:18,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bb4100e5d98197e7f8bb8bce119bab6f, ASSIGN in 357 msec 2024-11-22T03:39:19,010 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb4100e5d98197e7f8bb8bce119bab6f#info#compaction#63 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:39:19,010 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/43b4118777c644d9a28083ff2209fd23 is 1080, key is row0062/info:/1732246757436/Put/seqid=0 2024-11-22T03:39:19,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/.tmp/info/f5ae413b8d5b422d8962415460cdaa4a is 193, key is TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f./info:regioninfo/1732246758986/Put/seqid=0 2024-11-22T03:39:19,017 DEBUG [StoreOpener-16360f66cda3d64794cad5bf6d13c85c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c/info/138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f->hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/138e528794e64fbaac8e106401cc2bea-bottom 2024-11-22T03:39:19,017 INFO [StoreOpener-16360f66cda3d64794cad5bf6d13c85c-1 {}] regionserver.HStore(327): Store=16360f66cda3d64794cad5bf6d13c85c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:39:19,017 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 16360f66cda3d64794cad5bf6d13c85c 2024-11-22T03:39:19,018 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c 2024-11-22T03:39:19,020 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c 2024-11-22T03:39:19,020 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 16360f66cda3d64794cad5bf6d13c85c 2024-11-22T03:39:19,020 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 16360f66cda3d64794cad5bf6d13c85c 2024-11-22T03:39:19,022 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 16360f66cda3d64794cad5bf6d13c85c 2024-11-22T03:39:19,023 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 16360f66cda3d64794cad5bf6d13c85c; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=815433, jitterRate=0.03687678277492523}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:39:19,023 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 16360f66cda3d64794cad5bf6d13c85c 2024-11-22T03:39:19,023 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 16360f66cda3d64794cad5bf6d13c85c: Running coprocessor pre-open hook at 1732246758989Writing region info on filesystem at 1732246758989Initializing all the Stores at 1732246758990 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246758990Cleaning up temporary data from old regions at 1732246759020 (+30 ms)Running coprocessor post-open hooks at 1732246759023 (+3 ms)Region opened successfully at 1732246759023 2024-11-22T03:39:19,024 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c., pid=13, masterSystemTime=1732246758951 2024-11-22T03:39:19,024 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 16360f66cda3d64794cad5bf6d13c85c:info, priority=-2147483648, current under compaction store size is 2 2024-11-22T03:39:19,024 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:19,024 DEBUG [RS:0;b458937b0f5f:45889-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-22T03:39:19,025 INFO [RS:0;b458937b0f5f:45889-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c. 2024-11-22T03:39:19,025 DEBUG [RS:0;b458937b0f5f:45889-longCompactions-0 {}] regionserver.HStore(1541): 16360f66cda3d64794cad5bf6d13c85c/info is initiating minor compaction (all files) 2024-11-22T03:39:19,025 INFO [RS:0;b458937b0f5f:45889-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 16360f66cda3d64794cad5bf6d13c85c/info in TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c. 2024-11-22T03:39:19,026 INFO [RS:0;b458937b0f5f:45889-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c/info/138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f->hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/138e528794e64fbaac8e106401cc2bea-bottom] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c/.tmp, totalSize=72.6 K 2024-11-22T03:39:19,026 DEBUG [RS:0;b458937b0f5f:45889-longCompactions-0 {}] compactions.Compactor(225): Compacting 138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732246745208 2024-11-22T03:39:19,027 DEBUG [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c. 2024-11-22T03:39:19,027 INFO [RS_OPEN_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c. 2024-11-22T03:39:19,028 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=16360f66cda3d64794cad5bf6d13c85c, regionState=OPEN, openSeqNum=86, regionLocation=b458937b0f5f,45889,1732246733960 2024-11-22T03:39:19,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741849_1025 (size=9882) 2024-11-22T03:39:19,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741849_1025 (size=9882) 2024-11-22T03:39:19,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 16360f66cda3d64794cad5bf6d13c85c, server=b458937b0f5f,45889,1732246733960 because future has completed 2024-11-22T03:39:19,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/.tmp/info/f5ae413b8d5b422d8962415460cdaa4a 2024-11-22T03:39:19,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741848_1024 (size=8260) 2024-11-22T03:39:19,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741848_1024 (size=8260) 2024-11-22T03:39:19,042 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/43b4118777c644d9a28083ff2209fd23 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/43b4118777c644d9a28083ff2209fd23 2024-11-22T03:39:19,049 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-22T03:39:19,050 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in bb4100e5d98197e7f8bb8bce119bab6f/info of bb4100e5d98197e7f8bb8bce119bab6f into 43b4118777c644d9a28083ff2209fd23(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:39:19,050 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:19,050 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., storeName=bb4100e5d98197e7f8bb8bce119bab6f/info, priority=15, startTime=1732246758982; duration=0sec 2024-11-22T03:39:19,050 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:19,051 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb4100e5d98197e7f8bb8bce119bab6f:info 2024-11-22T03:39:19,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-22T03:39:19,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 16360f66cda3d64794cad5bf6d13c85c, server=b458937b0f5f,45889,1732246733960 in 238 msec 2024-11-22T03:39:19,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-22T03:39:19,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=16360f66cda3d64794cad5bf6d13c85c, ASSIGN in 411 msec 2024-11-22T03:39:19,060 INFO [RS:0;b458937b0f5f:45889-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 16360f66cda3d64794cad5bf6d13c85c#info#compaction#64 average throughput is 15.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:39:19,062 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c9836b67248bd41ebb2ddc55d1b69d7f, daughterA=16360f66cda3d64794cad5bf6d13c85c, daughterB=bb4100e5d98197e7f8bb8bce119bab6f in 1.5420 sec 2024-11-22T03:39:19,062 DEBUG [RS:0;b458937b0f5f:45889-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c/.tmp/info/068c2b5b075342aeb3b652c171928ef9 is 1080, key is row0001/info:/1732246745208/Put/seqid=0 2024-11-22T03:39:19,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/.tmp/ns/79ba83d05a0447dfac1f727fdce6d513 is 43, key is default/ns:d/1732246735063/Put/seqid=0 2024-11-22T03:39:19,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741850_1026 (size=70862) 2024-11-22T03:39:19,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741850_1026 (size=70862) 2024-11-22T03:39:19,076 DEBUG [RS:0;b458937b0f5f:45889-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c/.tmp/info/068c2b5b075342aeb3b652c171928ef9 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c/info/068c2b5b075342aeb3b652c171928ef9 2024-11-22T03:39:19,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741851_1027 (size=5153) 2024-11-22T03:39:19,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741851_1027 (size=5153) 2024-11-22T03:39:19,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/.tmp/ns/79ba83d05a0447dfac1f727fdce6d513 2024-11-22T03:39:19,083 INFO [RS:0;b458937b0f5f:45889-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 16360f66cda3d64794cad5bf6d13c85c/info of 16360f66cda3d64794cad5bf6d13c85c into 068c2b5b075342aeb3b652c171928ef9(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:39:19,083 DEBUG [RS:0;b458937b0f5f:45889-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 16360f66cda3d64794cad5bf6d13c85c: 2024-11-22T03:39:19,083 INFO [RS:0;b458937b0f5f:45889-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c., storeName=16360f66cda3d64794cad5bf6d13c85c/info, priority=15, startTime=1732246759024; duration=0sec 2024-11-22T03:39:19,083 DEBUG [RS:0;b458937b0f5f:45889-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:19,084 DEBUG [RS:0;b458937b0f5f:45889-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 16360f66cda3d64794cad5bf6d13c85c:info 2024-11-22T03:39:19,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/.tmp/table/474ee7cdd7dd4021804f53237ce4f357 is 65, key is TestLogRolling-testLogRolling/table:state/1732246735478/Put/seqid=0 2024-11-22T03:39:19,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741852_1028 (size=5340) 2024-11-22T03:39:19,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741852_1028 (size=5340) 2024-11-22T03:39:19,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/.tmp/table/474ee7cdd7dd4021804f53237ce4f357 2024-11-22T03:39:19,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/.tmp/info/f5ae413b8d5b422d8962415460cdaa4a as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/info/f5ae413b8d5b422d8962415460cdaa4a 2024-11-22T03:39:19,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/info/f5ae413b8d5b422d8962415460cdaa4a, entries=30, sequenceid=17, filesize=9.7 K 2024-11-22T03:39:19,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/.tmp/ns/79ba83d05a0447dfac1f727fdce6d513 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/ns/79ba83d05a0447dfac1f727fdce6d513 2024-11-22T03:39:19,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/ns/79ba83d05a0447dfac1f727fdce6d513, entries=2, sequenceid=17, filesize=5.0 K 2024-11-22T03:39:19,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/.tmp/table/474ee7cdd7dd4021804f53237ce4f357 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/table/474ee7cdd7dd4021804f53237ce4f357 2024-11-22T03:39:19,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/table/474ee7cdd7dd4021804f53237ce4f357, entries=2, sequenceid=17, filesize=5.2 K 2024-11-22T03:39:19,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 160ms, sequenceid=17, compaction requested=false 2024-11-22T03:39:19,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T03:39:19,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:19,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:19,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:50980 deadline: 1732246769444, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. is not online on b458937b0f5f,45889,1732246733960 2024-11-22T03:39:19,445 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., hostname=b458937b0f5f,45889,1732246733960, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., hostname=b458937b0f5f,45889,1732246733960, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. is not online on b458937b0f5f,45889,1732246733960 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T03:39:19,445 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., hostname=b458937b0f5f,45889,1732246733960, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f. is not online on b458937b0f5f,45889,1732246733960 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T03:39:19,445 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732246735107.c9836b67248bd41ebb2ddc55d1b69d7f., hostname=b458937b0f5f,45889,1732246733960, seqNum=2 from cache 2024-11-22T03:39:20,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:20,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:21,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:21,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:22,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:22,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:22,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,706 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,706 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,750 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:22,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:23,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:23,266 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:39:23,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,306 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,306 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,306 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,306 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:23,769 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:39:24,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:24,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:25,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:25,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:26,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:26,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:27,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:27,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:27,891 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-22T03:39:27,891 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T03:39:28,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:28,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:29,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:29,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:29,545 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., hostname=b458937b0f5f,45889,1732246733960, seqNum=86] 2024-11-22T03:39:29,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:29,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:39:29,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/2e029305a864467b8579f95379ef0e66 is 1080, key is row0065/info:/1732246769546/Put/seqid=0 2024-11-22T03:39:29,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741853_1029 (size=12509) 2024-11-22T03:39:29,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741853_1029 (size=12509) 2024-11-22T03:39:29,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/2e029305a864467b8579f95379ef0e66 2024-11-22T03:39:29,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/2e029305a864467b8579f95379ef0e66 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/2e029305a864467b8579f95379ef0e66 2024-11-22T03:39:29,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/2e029305a864467b8579f95379ef0e66, entries=7, sequenceid=96, filesize=12.2 K 2024-11-22T03:39:29,578 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for bb4100e5d98197e7f8bb8bce119bab6f in 22ms, sequenceid=96, compaction requested=false 2024-11-22T03:39:29,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:29,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:29,580 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T03:39:29,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/9631084d704a4ebdb814f3f8836ffc97 is 1080, key is row0072/info:/1732246769558/Put/seqid=0 2024-11-22T03:39:29,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741854_1030 (size=17894) 2024-11-22T03:39:29,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741854_1030 (size=17894) 2024-11-22T03:39:29,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/9631084d704a4ebdb814f3f8836ffc97 2024-11-22T03:39:29,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/9631084d704a4ebdb814f3f8836ffc97 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/9631084d704a4ebdb814f3f8836ffc97 2024-11-22T03:39:29,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/9631084d704a4ebdb814f3f8836ffc97, entries=12, sequenceid=111, filesize=17.5 K 2024-11-22T03:39:29,601 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for bb4100e5d98197e7f8bb8bce119bab6f in 21ms, sequenceid=111, compaction requested=true 2024-11-22T03:39:29,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:29,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb4100e5d98197e7f8bb8bce119bab6f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:39:29,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:29,601 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:39:29,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:29,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-22T03:39:29,603 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38663 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:39:29,603 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1541): bb4100e5d98197e7f8bb8bce119bab6f/info is initiating minor compaction (all files) 2024-11-22T03:39:29,603 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb4100e5d98197e7f8bb8bce119bab6f/info in TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:39:29,603 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/43b4118777c644d9a28083ff2209fd23, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/2e029305a864467b8579f95379ef0e66, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/9631084d704a4ebdb814f3f8836ffc97] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp, totalSize=37.8 K 2024-11-22T03:39:29,603 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 43b4118777c644d9a28083ff2209fd23, keycount=3, bloomtype=ROW, size=8.1 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732246757436 2024-11-22T03:39:29,604 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2e029305a864467b8579f95379ef0e66, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732246769546 2024-11-22T03:39:29,604 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9631084d704a4ebdb814f3f8836ffc97, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1732246769558 2024-11-22T03:39:29,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/a1149555180941cd88dcdb9b56d97356 is 1080, key is row0084/info:/1732246769581/Put/seqid=0 2024-11-22T03:39:29,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741855_1031 (size=16817) 2024-11-22T03:39:29,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741855_1031 (size=16817) 2024-11-22T03:39:29,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/a1149555180941cd88dcdb9b56d97356 2024-11-22T03:39:29,614 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb4100e5d98197e7f8bb8bce119bab6f#info#compaction#70 average throughput is 22.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:39:29,614 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/2b75a7c4d37946f9b8768d6d0b59732c is 1080, key is row0062/info:/1732246757436/Put/seqid=0 2024-11-22T03:39:29,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/a1149555180941cd88dcdb9b56d97356 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/a1149555180941cd88dcdb9b56d97356 2024-11-22T03:39:29,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741856_1032 (size=28855) 2024-11-22T03:39:29,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741856_1032 (size=28855) 2024-11-22T03:39:29,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/a1149555180941cd88dcdb9b56d97356, entries=11, sequenceid=125, filesize=16.4 K 2024-11-22T03:39:29,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=2.10 KB/2152 for bb4100e5d98197e7f8bb8bce119bab6f in 21ms, sequenceid=125, compaction requested=false 2024-11-22T03:39:29,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:29,624 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/2b75a7c4d37946f9b8768d6d0b59732c as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/2b75a7c4d37946f9b8768d6d0b59732c 2024-11-22T03:39:29,630 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb4100e5d98197e7f8bb8bce119bab6f/info of bb4100e5d98197e7f8bb8bce119bab6f into 2b75a7c4d37946f9b8768d6d0b59732c(size=28.2 K), total size for store is 44.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:39:29,630 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:29,630 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., storeName=bb4100e5d98197e7f8bb8bce119bab6f/info, priority=13, startTime=1732246769601; duration=0sec 2024-11-22T03:39:29,630 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:29,630 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb4100e5d98197e7f8bb8bce119bab6f:info 2024-11-22T03:39:30,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:30,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:31,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:31,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:31,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:31,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:39:31,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/ea333ce1b20f43baa86bcbbaabc54641 is 1080, key is row0095/info:/1732246769603/Put/seqid=0 2024-11-22T03:39:31,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741857_1033 (size=12515) 2024-11-22T03:39:31,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741857_1033 (size=12515) 2024-11-22T03:39:31,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/ea333ce1b20f43baa86bcbbaabc54641 2024-11-22T03:39:31,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/ea333ce1b20f43baa86bcbbaabc54641 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/ea333ce1b20f43baa86bcbbaabc54641 2024-11-22T03:39:31,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/ea333ce1b20f43baa86bcbbaabc54641, entries=7, sequenceid=136, filesize=12.2 K 2024-11-22T03:39:31,645 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for bb4100e5d98197e7f8bb8bce119bab6f in 23ms, sequenceid=136, compaction requested=true 2024-11-22T03:39:31,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:31,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb4100e5d98197e7f8bb8bce119bab6f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:39:31,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:31,645 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:39:31,646 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 58187 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:39:31,646 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1541): bb4100e5d98197e7f8bb8bce119bab6f/info is initiating minor compaction (all files) 2024-11-22T03:39:31,646 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb4100e5d98197e7f8bb8bce119bab6f/info in TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:39:31,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:31,646 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/2b75a7c4d37946f9b8768d6d0b59732c, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/a1149555180941cd88dcdb9b56d97356, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/ea333ce1b20f43baa86bcbbaabc54641] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp, totalSize=56.8 K 2024-11-22T03:39:31,646 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-22T03:39:31,647 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2b75a7c4d37946f9b8768d6d0b59732c, keycount=22, bloomtype=ROW, size=28.2 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1732246757436 2024-11-22T03:39:31,647 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting a1149555180941cd88dcdb9b56d97356, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1732246769581 2024-11-22T03:39:31,648 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting ea333ce1b20f43baa86bcbbaabc54641, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732246769603 2024-11-22T03:39:31,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/bbdf4ffda22a46238ce02400f111c938 is 1080, key is row0102/info:/1732246771622/Put/seqid=0 2024-11-22T03:39:31,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741858_1034 (size=16828) 2024-11-22T03:39:31,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741858_1034 (size=16828) 2024-11-22T03:39:31,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=150 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/bbdf4ffda22a46238ce02400f111c938 2024-11-22T03:39:31,660 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb4100e5d98197e7f8bb8bce119bab6f#info#compaction#73 average throughput is 41.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:39:31,661 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/7553b5f928d84fffabada5f6c5dddc5c is 1080, key is row0062/info:/1732246757436/Put/seqid=0 2024-11-22T03:39:31,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/bbdf4ffda22a46238ce02400f111c938 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/bbdf4ffda22a46238ce02400f111c938 2024-11-22T03:39:31,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741859_1035 (size=48385) 2024-11-22T03:39:31,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741859_1035 (size=48385) 2024-11-22T03:39:31,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/bbdf4ffda22a46238ce02400f111c938, entries=11, sequenceid=150, filesize=16.4 K 2024-11-22T03:39:31,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for bb4100e5d98197e7f8bb8bce119bab6f in 26ms, sequenceid=150, compaction requested=false 2024-11-22T03:39:31,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:31,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:31,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-22T03:39:31,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/955ff3cc96224af6958fa43d84300a27 is 1080, key is row0113/info:/1732246771648/Put/seqid=0 2024-11-22T03:39:31,678 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/7553b5f928d84fffabada5f6c5dddc5c as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/7553b5f928d84fffabada5f6c5dddc5c 2024-11-22T03:39:31,685 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb4100e5d98197e7f8bb8bce119bab6f/info of bb4100e5d98197e7f8bb8bce119bab6f into 7553b5f928d84fffabada5f6c5dddc5c(size=47.3 K), total size for store is 63.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:39:31,685 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:31,685 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., storeName=bb4100e5d98197e7f8bb8bce119bab6f/info, priority=13, startTime=1732246771645; duration=0sec 2024-11-22T03:39:31,685 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:31,685 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb4100e5d98197e7f8bb8bce119bab6f:info 2024-11-22T03:39:31,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741860_1036 (size=15750) 2024-11-22T03:39:31,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741860_1036 (size=15750) 2024-11-22T03:39:31,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/955ff3cc96224af6958fa43d84300a27 2024-11-22T03:39:31,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/955ff3cc96224af6958fa43d84300a27 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/955ff3cc96224af6958fa43d84300a27 2024-11-22T03:39:31,699 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/955ff3cc96224af6958fa43d84300a27, entries=10, sequenceid=163, filesize=15.4 K 2024-11-22T03:39:31,700 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=6.30 KB/6456 for bb4100e5d98197e7f8bb8bce119bab6f in 27ms, sequenceid=163, compaction requested=true 2024-11-22T03:39:31,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:31,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb4100e5d98197e7f8bb8bce119bab6f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:39:31,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:31,701 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:39:31,702 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 80963 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:39:31,702 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1541): bb4100e5d98197e7f8bb8bce119bab6f/info is initiating minor compaction (all files) 2024-11-22T03:39:31,702 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb4100e5d98197e7f8bb8bce119bab6f/info in TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:39:31,702 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/7553b5f928d84fffabada5f6c5dddc5c, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/bbdf4ffda22a46238ce02400f111c938, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/955ff3cc96224af6958fa43d84300a27] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp, totalSize=79.1 K 2024-11-22T03:39:31,702 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7553b5f928d84fffabada5f6c5dddc5c, keycount=40, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732246757436 2024-11-22T03:39:31,703 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting bbdf4ffda22a46238ce02400f111c938, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=150, earliestPutTs=1732246771622 2024-11-22T03:39:31,703 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 955ff3cc96224af6958fa43d84300a27, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732246771648 2024-11-22T03:39:31,715 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb4100e5d98197e7f8bb8bce119bab6f#info#compaction#75 average throughput is 62.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:39:31,716 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/cbfce2ad431c4aa294f17a2a3736575d is 1080, key is row0062/info:/1732246757436/Put/seqid=0 2024-11-22T03:39:31,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741861_1037 (size=71197) 2024-11-22T03:39:31,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741861_1037 (size=71197) 2024-11-22T03:39:31,726 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/cbfce2ad431c4aa294f17a2a3736575d as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/cbfce2ad431c4aa294f17a2a3736575d 2024-11-22T03:39:31,731 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb4100e5d98197e7f8bb8bce119bab6f/info of bb4100e5d98197e7f8bb8bce119bab6f into cbfce2ad431c4aa294f17a2a3736575d(size=69.5 K), total size for store is 69.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:39:31,731 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:31,731 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., storeName=bb4100e5d98197e7f8bb8bce119bab6f/info, priority=13, startTime=1732246771701; duration=0sec 2024-11-22T03:39:31,731 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:31,731 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb4100e5d98197e7f8bb8bce119bab6f:info 2024-11-22T03:39:32,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:32,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:33,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:33,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:33,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:33,694 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:39:33,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bb4100e5d98197e7f8bb8bce119bab6f, server=b458937b0f5f,45889,1732246733960 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-22T03:39:33,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:50980 deadline: 1732246783731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bb4100e5d98197e7f8bb8bce119bab6f, server=b458937b0f5f,45889,1732246733960 2024-11-22T03:39:33,733 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., hostname=b458937b0f5f,45889,1732246733960, seqNum=86 , the old value is region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., hostname=b458937b0f5f,45889,1732246733960, seqNum=86, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bb4100e5d98197e7f8bb8bce119bab6f, server=b458937b0f5f,45889,1732246733960 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T03:39:33,733 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., hostname=b458937b0f5f,45889,1732246733960, seqNum=86 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bb4100e5d98197e7f8bb8bce119bab6f, server=b458937b0f5f,45889,1732246733960 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T03:39:33,733 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., hostname=b458937b0f5f,45889,1732246733960, seqNum=86 because the exception is null or not the one we care about 2024-11-22T03:39:33,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/07c9c633b8f2460880e261761e11c5cd is 1080, key is row0123/info:/1732246771675/Put/seqid=0 2024-11-22T03:39:33,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741862_1038 (size=12516) 2024-11-22T03:39:33,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741862_1038 (size=12516) 2024-11-22T03:39:33,739 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/07c9c633b8f2460880e261761e11c5cd 2024-11-22T03:39:33,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/07c9c633b8f2460880e261761e11c5cd as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/07c9c633b8f2460880e261761e11c5cd 2024-11-22T03:39:33,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/07c9c633b8f2460880e261761e11c5cd, entries=7, sequenceid=175, filesize=12.2 K 2024-11-22T03:39:33,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for bb4100e5d98197e7f8bb8bce119bab6f in 55ms, sequenceid=175, compaction requested=false 2024-11-22T03:39:33,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:34,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:34,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:35,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:35,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:35,838 INFO [master/b458937b0f5f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T03:39:35,838 INFO [master/b458937b0f5f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T03:39:36,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:36,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:37,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:37,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:38,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:38,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:39,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:39,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:39,974 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-22T03:39:40,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:40,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:41,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:41,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:42,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:42,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:43,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:43,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:43,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:43,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-22T03:39:43,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/7feb503424f34b2ab1683bcffb643bb8 is 1080, key is row0130/info:/1732246773696/Put/seqid=0 2024-11-22T03:39:43,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741863_1039 (size=29784) 2024-11-22T03:39:43,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741863_1039 (size=29784) 2024-11-22T03:39:43,855 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/7feb503424f34b2ab1683bcffb643bb8 2024-11-22T03:39:43,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bb4100e5d98197e7f8bb8bce119bab6f, server=b458937b0f5f,45889,1732246733960 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-22T03:39:43,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:50980 deadline: 1732246793858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bb4100e5d98197e7f8bb8bce119bab6f, server=b458937b0f5f,45889,1732246733960 2024-11-22T03:39:43,859 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., hostname=b458937b0f5f,45889,1732246733960, seqNum=86 , the old value is region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., hostname=b458937b0f5f,45889,1732246733960, seqNum=86, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bb4100e5d98197e7f8bb8bce119bab6f, server=b458937b0f5f,45889,1732246733960 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T03:39:43,859 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., hostname=b458937b0f5f,45889,1732246733960, seqNum=86 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bb4100e5d98197e7f8bb8bce119bab6f, server=b458937b0f5f,45889,1732246733960 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T03:39:43,859 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., hostname=b458937b0f5f,45889,1732246733960, seqNum=86 because the exception is null or not the one we care about 2024-11-22T03:39:43,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/7feb503424f34b2ab1683bcffb643bb8 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/7feb503424f34b2ab1683bcffb643bb8 2024-11-22T03:39:43,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/7feb503424f34b2ab1683bcffb643bb8, entries=23, sequenceid=201, filesize=29.1 K 2024-11-22T03:39:43,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=6.30 KB/6456 for bb4100e5d98197e7f8bb8bce119bab6f in 29ms, sequenceid=201, compaction requested=true 2024-11-22T03:39:43,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:43,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb4100e5d98197e7f8bb8bce119bab6f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:39:43,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:43,867 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:39:43,869 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 113497 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:39:43,869 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1541): bb4100e5d98197e7f8bb8bce119bab6f/info is initiating minor compaction (all files) 2024-11-22T03:39:43,869 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb4100e5d98197e7f8bb8bce119bab6f/info in TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:39:43,869 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/cbfce2ad431c4aa294f17a2a3736575d, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/07c9c633b8f2460880e261761e11c5cd, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/7feb503424f34b2ab1683bcffb643bb8] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp, totalSize=110.8 K 2024-11-22T03:39:43,869 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting cbfce2ad431c4aa294f17a2a3736575d, keycount=61, bloomtype=ROW, size=69.5 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732246757436 2024-11-22T03:39:43,870 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 07c9c633b8f2460880e261761e11c5cd, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732246771675 2024-11-22T03:39:43,870 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7feb503424f34b2ab1683bcffb643bb8, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732246773696 2024-11-22T03:39:43,882 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb4100e5d98197e7f8bb8bce119bab6f#info#compaction#78 average throughput is 31.13 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:39:43,882 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/cc6ee2c0aa4d4adbaf03a9dba01132e8 is 1080, key is row0062/info:/1732246757436/Put/seqid=0 2024-11-22T03:39:43,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741864_1040 (size=103712) 2024-11-22T03:39:43,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741864_1040 (size=103712) 2024-11-22T03:39:43,891 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/cc6ee2c0aa4d4adbaf03a9dba01132e8 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/cc6ee2c0aa4d4adbaf03a9dba01132e8 2024-11-22T03:39:43,896 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb4100e5d98197e7f8bb8bce119bab6f/info of bb4100e5d98197e7f8bb8bce119bab6f into cc6ee2c0aa4d4adbaf03a9dba01132e8(size=101.3 K), total size for store is 101.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:39:43,896 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:43,896 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., storeName=bb4100e5d98197e7f8bb8bce119bab6f/info, priority=13, startTime=1732246783867; duration=0sec 2024-11-22T03:39:43,896 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:43,896 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb4100e5d98197e7f8bb8bce119bab6f:info 2024-11-22T03:39:44,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:44,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:45,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:45,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:46,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:46,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:47,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:47,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:48,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:48,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:48,528 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=64, reuseRatio=87.67% 2024-11-22T03:39:48,529 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-22T03:39:49,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:49,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:50,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:50,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:51,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:51,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:52,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:52,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:53,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:53,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:53,770 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:39:53,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:53,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:39:53,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/0c59311235c647f5b652984076802bef is 1080, key is row0153/info:/1732246783842/Put/seqid=0 2024-11-22T03:39:53,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741865_1041 (size=12516) 2024-11-22T03:39:53,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741865_1041 (size=12516) 2024-11-22T03:39:53,931 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/0c59311235c647f5b652984076802bef 2024-11-22T03:39:53,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/0c59311235c647f5b652984076802bef as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/0c59311235c647f5b652984076802bef 2024-11-22T03:39:53,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/0c59311235c647f5b652984076802bef, entries=7, sequenceid=212, filesize=12.2 K 2024-11-22T03:39:53,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for bb4100e5d98197e7f8bb8bce119bab6f in 22ms, sequenceid=212, compaction requested=false 2024-11-22T03:39:53,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:54,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:54,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:55,133 DEBUG [master/b458937b0f5f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-22T03:39:55,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:55,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:55,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,915 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:55,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:55,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:39:55,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/f23f80346c224a759d37a31e72a6aeaa is 1080, key is row0160/info:/1732246793922/Put/seqid=0 2024-11-22T03:39:55,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741866_1042 (size=12516) 2024-11-22T03:39:55,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741866_1042 (size=12516) 2024-11-22T03:39:55,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/f23f80346c224a759d37a31e72a6aeaa 2024-11-22T03:39:55,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/f23f80346c224a759d37a31e72a6aeaa as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f23f80346c224a759d37a31e72a6aeaa 2024-11-22T03:39:55,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f23f80346c224a759d37a31e72a6aeaa, entries=7, sequenceid=222, filesize=12.2 K 2024-11-22T03:39:55,956 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for bb4100e5d98197e7f8bb8bce119bab6f in 22ms, sequenceid=222, compaction requested=true 2024-11-22T03:39:55,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:55,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb4100e5d98197e7f8bb8bce119bab6f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:39:55,956 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:39:55,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:55,957 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128744 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:39:55,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:55,957 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1541): bb4100e5d98197e7f8bb8bce119bab6f/info is initiating minor compaction (all files) 2024-11-22T03:39:55,958 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb4100e5d98197e7f8bb8bce119bab6f/info in TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:39:55,958 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T03:39:55,958 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/cc6ee2c0aa4d4adbaf03a9dba01132e8, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/0c59311235c647f5b652984076802bef, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f23f80346c224a759d37a31e72a6aeaa] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp, totalSize=125.7 K 2024-11-22T03:39:55,958 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting cc6ee2c0aa4d4adbaf03a9dba01132e8, keycount=91, bloomtype=ROW, size=101.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732246757436 2024-11-22T03:39:55,959 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0c59311235c647f5b652984076802bef, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732246783842 2024-11-22T03:39:55,959 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting f23f80346c224a759d37a31e72a6aeaa, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732246793922 2024-11-22T03:39:55,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/f6d8f8e46cc94e548b11f629c76502b4 is 1080, key is row0167/info:/1732246795935/Put/seqid=0 2024-11-22T03:39:55,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741867_1043 (size=17906) 2024-11-22T03:39:55,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741867_1043 (size=17906) 2024-11-22T03:39:55,969 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/f6d8f8e46cc94e548b11f629c76502b4 2024-11-22T03:39:55,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/f6d8f8e46cc94e548b11f629c76502b4 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f6d8f8e46cc94e548b11f629c76502b4 2024-11-22T03:39:55,978 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb4100e5d98197e7f8bb8bce119bab6f#info#compaction#82 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:39:55,979 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/94d4d76295ed4c759637f2f9056efceb is 1080, key is row0062/info:/1732246757436/Put/seqid=0 2024-11-22T03:39:55,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f6d8f8e46cc94e548b11f629c76502b4, entries=12, sequenceid=237, filesize=17.5 K 2024-11-22T03:39:55,983 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for bb4100e5d98197e7f8bb8bce119bab6f in 25ms, sequenceid=237, compaction requested=false 2024-11-22T03:39:55,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:55,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:55,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T03:39:55,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741868_1044 (size=118910) 2024-11-22T03:39:55,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741868_1044 (size=118910) 2024-11-22T03:39:55,988 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/b7865e2f49184a2c8d8243b6a91d186e is 1080, key is row0179/info:/1732246795959/Put/seqid=0 2024-11-22T03:39:55,992 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/94d4d76295ed4c759637f2f9056efceb as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/94d4d76295ed4c759637f2f9056efceb 2024-11-22T03:39:55,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741869_1045 (size=17906) 2024-11-22T03:39:55,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741869_1045 (size=17906) 2024-11-22T03:39:55,999 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb4100e5d98197e7f8bb8bce119bab6f/info of bb4100e5d98197e7f8bb8bce119bab6f into 94d4d76295ed4c759637f2f9056efceb(size=116.1 K), total size for store is 133.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:39:55,999 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:55,999 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., storeName=bb4100e5d98197e7f8bb8bce119bab6f/info, priority=13, startTime=1732246795956; duration=0sec 2024-11-22T03:39:55,999 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:55,999 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb4100e5d98197e7f8bb8bce119bab6f:info 2024-11-22T03:39:56,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:56,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:56,400 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/b7865e2f49184a2c8d8243b6a91d186e 2024-11-22T03:39:56,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/b7865e2f49184a2c8d8243b6a91d186e as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/b7865e2f49184a2c8d8243b6a91d186e 2024-11-22T03:39:56,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/b7865e2f49184a2c8d8243b6a91d186e, entries=12, sequenceid=252, filesize=17.5 K 2024-11-22T03:39:56,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=2.10 KB/2152 for bb4100e5d98197e7f8bb8bce119bab6f in 436ms, sequenceid=252, compaction requested=true 2024-11-22T03:39:56,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:56,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb4100e5d98197e7f8bb8bce119bab6f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:39:56,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:56,421 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:39:56,422 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 154722 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:39:56,422 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1541): bb4100e5d98197e7f8bb8bce119bab6f/info is initiating minor compaction (all files) 2024-11-22T03:39:56,422 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb4100e5d98197e7f8bb8bce119bab6f/info in TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:39:56,422 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/94d4d76295ed4c759637f2f9056efceb, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f6d8f8e46cc94e548b11f629c76502b4, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/b7865e2f49184a2c8d8243b6a91d186e] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp, totalSize=151.1 K 2024-11-22T03:39:56,422 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:39:56,423 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 94d4d76295ed4c759637f2f9056efceb, keycount=105, bloomtype=ROW, size=116.1 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732246757436 2024-11-22T03:39:56,423 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting f6d8f8e46cc94e548b11f629c76502b4, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732246795935 2024-11-22T03:39:56,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,423 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting b7865e2f49184a2c8d8243b6a91d186e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732246795959 2024-11-22T03:39:56,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,425 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,425 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,436 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb4100e5d98197e7f8bb8bce119bab6f#info#compaction#84 average throughput is 44.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:39:56,437 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/b5091cf819d5434c91e693d7ece73d80 is 1080, key is row0062/info:/1732246757436/Put/seqid=0 2024-11-22T03:39:56,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741870_1046 (size=145089) 2024-11-22T03:39:56,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741870_1046 (size=145089) 2024-11-22T03:39:56,447 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/b5091cf819d5434c91e693d7ece73d80 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/b5091cf819d5434c91e693d7ece73d80 2024-11-22T03:39:56,453 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb4100e5d98197e7f8bb8bce119bab6f/info of bb4100e5d98197e7f8bb8bce119bab6f into b5091cf819d5434c91e693d7ece73d80(size=141.7 K), total size for store is 141.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:39:56,453 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:56,453 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., storeName=bb4100e5d98197e7f8bb8bce119bab6f/info, priority=13, startTime=1732246796420; duration=0sec 2024-11-22T03:39:56,453 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:56,453 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb4100e5d98197e7f8bb8bce119bab6f:info 2024-11-22T03:39:56,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,458 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,458 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,458 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:56,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:39:57,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:57,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:58,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:58,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:39:58,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/842f5a2083af4ac38a4365e3969b317e is 1080, key is row0191/info:/1732246795986/Put/seqid=0 2024-11-22T03:39:58,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741871_1047 (size=12521) 2024-11-22T03:39:58,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741871_1047 (size=12521) 2024-11-22T03:39:58,019 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/842f5a2083af4ac38a4365e3969b317e 2024-11-22T03:39:58,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/842f5a2083af4ac38a4365e3969b317e as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/842f5a2083af4ac38a4365e3969b317e 2024-11-22T03:39:58,029 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/842f5a2083af4ac38a4365e3969b317e, entries=7, sequenceid=264, filesize=12.2 K 2024-11-22T03:39:58,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for bb4100e5d98197e7f8bb8bce119bab6f in 22ms, sequenceid=264, compaction requested=false 2024-11-22T03:39:58,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:58,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:58,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T03:39:58,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/342e4763681749bfabc94fb2050a29a0 is 1080, key is row0198/info:/1732246798009/Put/seqid=0 2024-11-22T03:39:58,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741872_1048 (size=19013) 2024-11-22T03:39:58,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741872_1048 (size=19013) 2024-11-22T03:39:58,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/342e4763681749bfabc94fb2050a29a0 2024-11-22T03:39:58,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/342e4763681749bfabc94fb2050a29a0 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/342e4763681749bfabc94fb2050a29a0 2024-11-22T03:39:58,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/342e4763681749bfabc94fb2050a29a0, entries=13, sequenceid=280, filesize=18.6 K 2024-11-22T03:39:58,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for bb4100e5d98197e7f8bb8bce119bab6f in 23ms, sequenceid=280, compaction requested=true 2024-11-22T03:39:58,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:58,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb4100e5d98197e7f8bb8bce119bab6f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:39:58,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:58,054 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:39:58,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:39:58,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T03:39:58,055 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 176623 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:39:58,055 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1541): bb4100e5d98197e7f8bb8bce119bab6f/info is initiating minor compaction (all files) 2024-11-22T03:39:58,055 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb4100e5d98197e7f8bb8bce119bab6f/info in TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:39:58,056 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/b5091cf819d5434c91e693d7ece73d80, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/842f5a2083af4ac38a4365e3969b317e, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/342e4763681749bfabc94fb2050a29a0] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp, totalSize=172.5 K 2024-11-22T03:39:58,056 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting b5091cf819d5434c91e693d7ece73d80, keycount=129, bloomtype=ROW, size=141.7 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732246757436 2024-11-22T03:39:58,056 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 842f5a2083af4ac38a4365e3969b317e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732246795986 2024-11-22T03:39:58,057 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 342e4763681749bfabc94fb2050a29a0, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732246798009 2024-11-22T03:39:58,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/be1a20b301e747f8a859794233546dc0 is 1080, key is row0211/info:/1732246798032/Put/seqid=0 2024-11-22T03:39:58,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741873_1049 (size=19013) 2024-11-22T03:39:58,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741873_1049 (size=19013) 2024-11-22T03:39:58,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/be1a20b301e747f8a859794233546dc0 2024-11-22T03:39:58,070 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb4100e5d98197e7f8bb8bce119bab6f#info#compaction#88 average throughput is 50.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:39:58,070 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/f81a16f8ab054df196973e864b44b456 is 1080, key is row0062/info:/1732246757436/Put/seqid=0 2024-11-22T03:39:58,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/be1a20b301e747f8a859794233546dc0 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/be1a20b301e747f8a859794233546dc0 2024-11-22T03:39:58,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741874_1050 (size=166773) 2024-11-22T03:39:58,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741874_1050 (size=166773) 2024-11-22T03:39:58,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/be1a20b301e747f8a859794233546dc0, entries=13, sequenceid=296, filesize=18.6 K 2024-11-22T03:39:58,077 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=1.05 KB/1076 for bb4100e5d98197e7f8bb8bce119bab6f in 22ms, sequenceid=296, compaction requested=false 2024-11-22T03:39:58,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:58,077 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/f81a16f8ab054df196973e864b44b456 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f81a16f8ab054df196973e864b44b456 2024-11-22T03:39:58,083 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb4100e5d98197e7f8bb8bce119bab6f/info of bb4100e5d98197e7f8bb8bce119bab6f into f81a16f8ab054df196973e864b44b456(size=162.9 K), total size for store is 181.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:39:58,083 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:39:58,083 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., storeName=bb4100e5d98197e7f8bb8bce119bab6f/info, priority=13, startTime=1732246798054; duration=0sec 2024-11-22T03:39:58,083 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:39:58,083 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb4100e5d98197e7f8bb8bce119bab6f:info 2024-11-22T03:39:58,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:58,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:59,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:39:59,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:40:00,077 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:40:00,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/51e5c27ae9234f5d8d1ad5ca02847335 is 1080, key is row0224/info:/1732246798056/Put/seqid=0 2024-11-22T03:40:00,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741875_1051 (size=12523) 2024-11-22T03:40:00,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741875_1051 (size=12523) 2024-11-22T03:40:00,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=307 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/51e5c27ae9234f5d8d1ad5ca02847335 2024-11-22T03:40:00,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/51e5c27ae9234f5d8d1ad5ca02847335 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/51e5c27ae9234f5d8d1ad5ca02847335 2024-11-22T03:40:00,100 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/51e5c27ae9234f5d8d1ad5ca02847335, entries=7, sequenceid=307, filesize=12.2 K 2024-11-22T03:40:00,101 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for bb4100e5d98197e7f8bb8bce119bab6f in 24ms, sequenceid=307, compaction requested=true 2024-11-22T03:40:00,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:40:00,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb4100e5d98197e7f8bb8bce119bab6f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:40:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:40:00,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:40:00,101 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:40:00,102 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T03:40:00,103 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 198309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:40:00,103 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1541): bb4100e5d98197e7f8bb8bce119bab6f/info is initiating minor compaction (all files) 2024-11-22T03:40:00,103 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb4100e5d98197e7f8bb8bce119bab6f/info in TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:40:00,103 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f81a16f8ab054df196973e864b44b456, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/be1a20b301e747f8a859794233546dc0, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/51e5c27ae9234f5d8d1ad5ca02847335] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp, totalSize=193.7 K 2024-11-22T03:40:00,103 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting f81a16f8ab054df196973e864b44b456, keycount=149, bloomtype=ROW, size=162.9 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732246757436 2024-11-22T03:40:00,104 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting be1a20b301e747f8a859794233546dc0, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732246798032 2024-11-22T03:40:00,104 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 51e5c27ae9234f5d8d1ad5ca02847335, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1732246798056 2024-11-22T03:40:00,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/965d21c4a8fc43cfaeae3ac35bd33a04 is 1080, key is row0231/info:/1732246800078/Put/seqid=0 2024-11-22T03:40:00,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741876_1052 (size=19013) 2024-11-22T03:40:00,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741876_1052 (size=19013) 2024-11-22T03:40:00,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/965d21c4a8fc43cfaeae3ac35bd33a04 2024-11-22T03:40:00,118 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb4100e5d98197e7f8bb8bce119bab6f#info#compaction#91 average throughput is 43.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:40:00,119 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/eab381dca4d04ea3931adf7de808dddd is 1080, key is row0062/info:/1732246757436/Put/seqid=0 2024-11-22T03:40:00,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/965d21c4a8fc43cfaeae3ac35bd33a04 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/965d21c4a8fc43cfaeae3ac35bd33a04 2024-11-22T03:40:00,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741877_1053 (size=188463) 2024-11-22T03:40:00,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741877_1053 (size=188463) 2024-11-22T03:40:00,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/965d21c4a8fc43cfaeae3ac35bd33a04, entries=13, sequenceid=323, filesize=18.6 K 2024-11-22T03:40:00,127 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/eab381dca4d04ea3931adf7de808dddd as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/eab381dca4d04ea3931adf7de808dddd 2024-11-22T03:40:00,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for bb4100e5d98197e7f8bb8bce119bab6f in 24ms, sequenceid=323, compaction requested=false 2024-11-22T03:40:00,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:40:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45889 {}] regionserver.HRegion(8855): Flush requested on bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:40:00,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb4100e5d98197e7f8bb8bce119bab6f 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T03:40:00,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/8e456b9e1f43486180cd5c7f89c5ee0d is 1080, key is row0244/info:/1732246800103/Put/seqid=0 2024-11-22T03:40:00,132 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb4100e5d98197e7f8bb8bce119bab6f/info of bb4100e5d98197e7f8bb8bce119bab6f into eab381dca4d04ea3931adf7de808dddd(size=184.0 K), total size for store is 202.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:40:00,132 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:40:00,132 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., storeName=bb4100e5d98197e7f8bb8bce119bab6f/info, priority=13, startTime=1732246800101; duration=0sec 2024-11-22T03:40:00,132 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:40:00,132 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb4100e5d98197e7f8bb8bce119bab6f:info 2024-11-22T03:40:00,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741878_1054 (size=19013) 2024-11-22T03:40:00,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741878_1054 (size=19013) 2024-11-22T03:40:00,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/8e456b9e1f43486180cd5c7f89c5ee0d 2024-11-22T03:40:00,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/8e456b9e1f43486180cd5c7f89c5ee0d as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/8e456b9e1f43486180cd5c7f89c5ee0d 2024-11-22T03:40:00,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/8e456b9e1f43486180cd5c7f89c5ee0d, entries=13, sequenceid=339, filesize=18.6 K 2024-11-22T03:40:00,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for bb4100e5d98197e7f8bb8bce119bab6f in 17ms, sequenceid=339, compaction requested=true 2024-11-22T03:40:00,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:40:00,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb4100e5d98197e7f8bb8bce119bab6f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:40:00,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:40:00,144 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:40:00,145 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 226489 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:40:00,145 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1541): bb4100e5d98197e7f8bb8bce119bab6f/info is initiating minor compaction (all files) 2024-11-22T03:40:00,145 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb4100e5d98197e7f8bb8bce119bab6f/info in TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:40:00,145 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/eab381dca4d04ea3931adf7de808dddd, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/965d21c4a8fc43cfaeae3ac35bd33a04, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/8e456b9e1f43486180cd5c7f89c5ee0d] into tmpdir=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp, totalSize=221.2 K 2024-11-22T03:40:00,146 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting eab381dca4d04ea3931adf7de808dddd, keycount=169, bloomtype=ROW, size=184.0 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1732246757436 2024-11-22T03:40:00,146 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 965d21c4a8fc43cfaeae3ac35bd33a04, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732246800078 2024-11-22T03:40:00,147 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e456b9e1f43486180cd5c7f89c5ee0d, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732246800103 2024-11-22T03:40:00,156 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb4100e5d98197e7f8bb8bce119bab6f#info#compaction#93 average throughput is 66.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:40:00,157 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/0622eeff44bf471b9d0c0eebe050a6e2 is 1080, key is row0062/info:/1732246757436/Put/seqid=0 2024-11-22T03:40:00,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741879_1055 (size=216692) 2024-11-22T03:40:00,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741879_1055 (size=216692) 2024-11-22T03:40:00,163 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/.tmp/info/0622eeff44bf471b9d0c0eebe050a6e2 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/0622eeff44bf471b9d0c0eebe050a6e2 2024-11-22T03:40:00,168 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb4100e5d98197e7f8bb8bce119bab6f/info of bb4100e5d98197e7f8bb8bce119bab6f into 0622eeff44bf471b9d0c0eebe050a6e2(size=211.6 K), total size for store is 211.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:40:00,168 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:40:00,168 INFO [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., storeName=bb4100e5d98197e7f8bb8bce119bab6f/info, priority=13, startTime=1732246800144; duration=0sec 2024-11-22T03:40:00,168 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:40:00,168 DEBUG [RS:0;b458937b0f5f:45889-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb4100e5d98197e7f8bb8bce119bab6f:info 2024-11-22T03:40:00,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:00,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:01,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:01,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:02,128 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-22T03:40:02,129 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C45889%2C1732246733960.1732246802129 2024-11-22T03:40:02,139 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,139 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,140 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,140 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,140 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,140 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/WALs/b458937b0f5f,45889,1732246733960/b458937b0f5f%2C45889%2C1732246733960.1732246734555 with entries=319, filesize=310.54 KB; new WAL /user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/WALs/b458937b0f5f,45889,1732246733960/b458937b0f5f%2C45889%2C1732246733960.1732246802129 2024-11-22T03:40:02,141 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45483:45483),(127.0.0.1/127.0.0.1:42925:42925)] 2024-11-22T03:40:02,141 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/WALs/b458937b0f5f,45889,1732246733960/b458937b0f5f%2C45889%2C1732246733960.1732246734555 is not closed yet, will try archiving it next time 2024-11-22T03:40:02,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741833_1009 (size=317998) 2024-11-22T03:40:02,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741833_1009 (size=317998) 2024-11-22T03:40:02,143 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/WALs/b458937b0f5f,45889,1732246733960/b458937b0f5f%2C45889%2C1732246733960.1732246734555 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/oldWALs/b458937b0f5f%2C45889%2C1732246733960.1732246734555 2024-11-22T03:40:02,146 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for bb4100e5d98197e7f8bb8bce119bab6f: 2024-11-22T03:40:02,146 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 16360f66cda3d64794cad5bf6d13c85c: 2024-11-22T03:40:02,146 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-22T03:40:02,153 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/.tmp/info/a79d0cee39f7414884ca5b3b3c2bbb88 is 186, key is TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c./info:regioninfo/1732246759028/Put/seqid=0 2024-11-22T03:40:02,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741881_1057 (size=6153) 2024-11-22T03:40:02,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741881_1057 (size=6153) 2024-11-22T03:40:02,158 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/.tmp/info/a79d0cee39f7414884ca5b3b3c2bbb88 2024-11-22T03:40:02,163 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/.tmp/info/a79d0cee39f7414884ca5b3b3c2bbb88 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/info/a79d0cee39f7414884ca5b3b3c2bbb88 2024-11-22T03:40:02,168 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/info/a79d0cee39f7414884ca5b3b3c2bbb88, entries=5, sequenceid=21, filesize=6.0 K 2024-11-22T03:40:02,169 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 23ms, sequenceid=21, compaction requested=false 2024-11-22T03:40:02,169 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T03:40:02,170 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T03:40:02,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:40:02,170 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:40:02,170 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:40:02,170 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:40:02,170 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:40:02,170 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:40:02,170 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:40:02,170 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1321148526, stopped=false 2024-11-22T03:40:02,170 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b458937b0f5f,38087,1732246733790 2024-11-22T03:40:02,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:40:02,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:40:02,229 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:40:02,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:02,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:02,229 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:40:02,229 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:40:02,230 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:40:02,230 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b458937b0f5f,45889,1732246733960' ***** 2024-11-22T03:40:02,230 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:40:02,230 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:40:02,231 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:40:02,231 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:40:02,231 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:40:02,232 INFO [RS:0;b458937b0f5f:45889 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:40:02,232 INFO [RS:0;b458937b0f5f:45889 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:40:02,232 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(3091): Received CLOSE for bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:40:02,232 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(3091): Received CLOSE for 16360f66cda3d64794cad5bf6d13c85c 2024-11-22T03:40:02,233 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(959): stopping server b458937b0f5f,45889,1732246733960 2024-11-22T03:40:02,233 INFO [RS:0;b458937b0f5f:45889 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:40:02,233 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bb4100e5d98197e7f8bb8bce119bab6f, disabling compactions & flushes 2024-11-22T03:40:02,233 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:40:02,233 INFO [RS:0;b458937b0f5f:45889 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b458937b0f5f:45889. 2024-11-22T03:40:02,233 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:40:02,233 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. after waiting 0 ms 2024-11-22T03:40:02,233 DEBUG [RS:0;b458937b0f5f:45889 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:40:02,233 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:40:02,233 DEBUG [RS:0;b458937b0f5f:45889 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:40:02,233 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:40:02,233 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:40:02,233 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:40:02,233 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:40:02,234 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-22T03:40:02,234 DEBUG [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(1325): Online Regions={bb4100e5d98197e7f8bb8bce119bab6f=TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f., 16360f66cda3d64794cad5bf6d13c85c=TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T03:40:02,234 DEBUG [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 16360f66cda3d64794cad5bf6d13c85c, bb4100e5d98197e7f8bb8bce119bab6f 2024-11-22T03:40:02,234 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:40:02,234 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:40:02,234 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:40:02,234 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:40:02,234 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:40:02,234 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f->hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/138e528794e64fbaac8e106401cc2bea-top, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/43b4118777c644d9a28083ff2209fd23, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/2e029305a864467b8579f95379ef0e66, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/2b75a7c4d37946f9b8768d6d0b59732c, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/9631084d704a4ebdb814f3f8836ffc97, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/a1149555180941cd88dcdb9b56d97356, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/7553b5f928d84fffabada5f6c5dddc5c, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/ea333ce1b20f43baa86bcbbaabc54641, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/bbdf4ffda22a46238ce02400f111c938, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/cbfce2ad431c4aa294f17a2a3736575d, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/955ff3cc96224af6958fa43d84300a27, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/07c9c633b8f2460880e261761e11c5cd, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/cc6ee2c0aa4d4adbaf03a9dba01132e8, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/7feb503424f34b2ab1683bcffb643bb8, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/0c59311235c647f5b652984076802bef, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/94d4d76295ed4c759637f2f9056efceb, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f23f80346c224a759d37a31e72a6aeaa, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f6d8f8e46cc94e548b11f629c76502b4, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/b5091cf819d5434c91e693d7ece73d80, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/b7865e2f49184a2c8d8243b6a91d186e, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/842f5a2083af4ac38a4365e3969b317e, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f81a16f8ab054df196973e864b44b456, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/342e4763681749bfabc94fb2050a29a0, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/be1a20b301e747f8a859794233546dc0, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/eab381dca4d04ea3931adf7de808dddd, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/51e5c27ae9234f5d8d1ad5ca02847335, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/965d21c4a8fc43cfaeae3ac35bd33a04, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/8e456b9e1f43486180cd5c7f89c5ee0d] to archive 2024-11-22T03:40:02,236 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T03:40:02,238 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:40:02,239 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/43b4118777c644d9a28083ff2209fd23 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/43b4118777c644d9a28083ff2209fd23 2024-11-22T03:40:02,240 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-22T03:40:02,240 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:40:02,240 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:40:02,240 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246802234Running coprocessor pre-close hooks at 1732246802234Disabling compacts and flushes for region at 1732246802234Disabling writes for close at 1732246802234Writing region close event to WAL at 1732246802236 (+2 ms)Running coprocessor post-close hooks at 1732246802240 (+4 ms)Closed at 1732246802240 2024-11-22T03:40:02,241 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:40:02,241 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/2e029305a864467b8579f95379ef0e66 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/2e029305a864467b8579f95379ef0e66 2024-11-22T03:40:02,242 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/2b75a7c4d37946f9b8768d6d0b59732c to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/2b75a7c4d37946f9b8768d6d0b59732c 2024-11-22T03:40:02,243 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/9631084d704a4ebdb814f3f8836ffc97 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/9631084d704a4ebdb814f3f8836ffc97 2024-11-22T03:40:02,244 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/a1149555180941cd88dcdb9b56d97356 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/a1149555180941cd88dcdb9b56d97356 2024-11-22T03:40:02,245 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/7553b5f928d84fffabada5f6c5dddc5c to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/7553b5f928d84fffabada5f6c5dddc5c 2024-11-22T03:40:02,246 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/ea333ce1b20f43baa86bcbbaabc54641 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/ea333ce1b20f43baa86bcbbaabc54641 2024-11-22T03:40:02,247 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/bbdf4ffda22a46238ce02400f111c938 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/bbdf4ffda22a46238ce02400f111c938 2024-11-22T03:40:02,248 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/cbfce2ad431c4aa294f17a2a3736575d to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/cbfce2ad431c4aa294f17a2a3736575d 2024-11-22T03:40:02,248 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/955ff3cc96224af6958fa43d84300a27 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/955ff3cc96224af6958fa43d84300a27 2024-11-22T03:40:02,249 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/07c9c633b8f2460880e261761e11c5cd to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/07c9c633b8f2460880e261761e11c5cd 2024-11-22T03:40:02,250 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/cc6ee2c0aa4d4adbaf03a9dba01132e8 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/cc6ee2c0aa4d4adbaf03a9dba01132e8 2024-11-22T03:40:02,251 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/7feb503424f34b2ab1683bcffb643bb8 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/7feb503424f34b2ab1683bcffb643bb8 2024-11-22T03:40:02,252 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/0c59311235c647f5b652984076802bef to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/0c59311235c647f5b652984076802bef 2024-11-22T03:40:02,253 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/94d4d76295ed4c759637f2f9056efceb to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/94d4d76295ed4c759637f2f9056efceb 2024-11-22T03:40:02,254 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f23f80346c224a759d37a31e72a6aeaa to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f23f80346c224a759d37a31e72a6aeaa 2024-11-22T03:40:02,255 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f6d8f8e46cc94e548b11f629c76502b4 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f6d8f8e46cc94e548b11f629c76502b4 2024-11-22T03:40:02,255 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/b5091cf819d5434c91e693d7ece73d80 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/b5091cf819d5434c91e693d7ece73d80 2024-11-22T03:40:02,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:02,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:02,256 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/b7865e2f49184a2c8d8243b6a91d186e to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/b7865e2f49184a2c8d8243b6a91d186e 2024-11-22T03:40:02,257 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/842f5a2083af4ac38a4365e3969b317e to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/842f5a2083af4ac38a4365e3969b317e 2024-11-22T03:40:02,258 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f81a16f8ab054df196973e864b44b456 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/f81a16f8ab054df196973e864b44b456 2024-11-22T03:40:02,259 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/342e4763681749bfabc94fb2050a29a0 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/342e4763681749bfabc94fb2050a29a0 2024-11-22T03:40:02,260 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/be1a20b301e747f8a859794233546dc0 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/be1a20b301e747f8a859794233546dc0 2024-11-22T03:40:02,261 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/eab381dca4d04ea3931adf7de808dddd to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/eab381dca4d04ea3931adf7de808dddd 2024-11-22T03:40:02,261 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/51e5c27ae9234f5d8d1ad5ca02847335 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/51e5c27ae9234f5d8d1ad5ca02847335 2024-11-22T03:40:02,262 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/965d21c4a8fc43cfaeae3ac35bd33a04 to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/965d21c4a8fc43cfaeae3ac35bd33a04 2024-11-22T03:40:02,263 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/8e456b9e1f43486180cd5c7f89c5ee0d to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/info/8e456b9e1f43486180cd5c7f89c5ee0d 2024-11-22T03:40:02,263 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=b458937b0f5f:38087 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-22T03:40:02,264 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [43b4118777c644d9a28083ff2209fd23=8260, 2e029305a864467b8579f95379ef0e66=12509, 2b75a7c4d37946f9b8768d6d0b59732c=28855, 9631084d704a4ebdb814f3f8836ffc97=17894, a1149555180941cd88dcdb9b56d97356=16817, 7553b5f928d84fffabada5f6c5dddc5c=48385, ea333ce1b20f43baa86bcbbaabc54641=12515, bbdf4ffda22a46238ce02400f111c938=16828, cbfce2ad431c4aa294f17a2a3736575d=71197, 955ff3cc96224af6958fa43d84300a27=15750, 07c9c633b8f2460880e261761e11c5cd=12516, cc6ee2c0aa4d4adbaf03a9dba01132e8=103712, 7feb503424f34b2ab1683bcffb643bb8=29784, 0c59311235c647f5b652984076802bef=12516, 94d4d76295ed4c759637f2f9056efceb=118910, f23f80346c224a759d37a31e72a6aeaa=12516, f6d8f8e46cc94e548b11f629c76502b4=17906, b5091cf819d5434c91e693d7ece73d80=145089, b7865e2f49184a2c8d8243b6a91d186e=17906, 842f5a2083af4ac38a4365e3969b317e=12521, f81a16f8ab054df196973e864b44b456=166773, 342e4763681749bfabc94fb2050a29a0=19013, be1a20b301e747f8a859794233546dc0=19013, eab381dca4d04ea3931adf7de808dddd=188463, 51e5c27ae9234f5d8d1ad5ca02847335=12523, 965d21c4a8fc43cfaeae3ac35bd33a04=19013, 8e456b9e1f43486180cd5c7f89c5ee0d=19013] 2024-11-22T03:40:02,267 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/bb4100e5d98197e7f8bb8bce119bab6f/recovered.edits/345.seqid, newMaxSeqId=345, maxSeqId=85 2024-11-22T03:40:02,267 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:40:02,268 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bb4100e5d98197e7f8bb8bce119bab6f: Waiting for close lock at 1732246802233Running coprocessor pre-close hooks at 1732246802233Disabling compacts and flushes for region at 1732246802233Disabling writes for close at 1732246802233Writing region close event to WAL at 1732246802264 (+31 ms)Running coprocessor post-close hooks at 1732246802267 (+3 ms)Closed at 1732246802267 2024-11-22T03:40:02,268 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732246757512.bb4100e5d98197e7f8bb8bce119bab6f. 2024-11-22T03:40:02,268 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 16360f66cda3d64794cad5bf6d13c85c, disabling compactions & flushes 2024-11-22T03:40:02,268 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c. 2024-11-22T03:40:02,268 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c. 2024-11-22T03:40:02,268 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c. after waiting 0 ms 2024-11-22T03:40:02,268 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c. 2024-11-22T03:40:02,268 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c/info/138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f->hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/c9836b67248bd41ebb2ddc55d1b69d7f/info/138e528794e64fbaac8e106401cc2bea-bottom] to archive 2024-11-22T03:40:02,269 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T03:40:02,270 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c/info/138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f to hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/archive/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c/info/138e528794e64fbaac8e106401cc2bea.c9836b67248bd41ebb2ddc55d1b69d7f 2024-11-22T03:40:02,270 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-22T03:40:02,273 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/data/default/TestLogRolling-testLogRolling/16360f66cda3d64794cad5bf6d13c85c/recovered.edits/90.seqid, newMaxSeqId=90, maxSeqId=85 2024-11-22T03:40:02,274 INFO [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c. 2024-11-22T03:40:02,274 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 16360f66cda3d64794cad5bf6d13c85c: Waiting for close lock at 1732246802268Running coprocessor pre-close hooks at 1732246802268Disabling compacts and flushes for region at 1732246802268Disabling writes for close at 1732246802268Writing region close event to WAL at 1732246802271 (+3 ms)Running coprocessor post-close hooks at 1732246802274 (+3 ms)Closed at 1732246802274 2024-11-22T03:40:02,274 DEBUG [RS_CLOSE_REGION-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732246757512.16360f66cda3d64794cad5bf6d13c85c. 2024-11-22T03:40:02,416 INFO [regionserver/b458937b0f5f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:40:02,434 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(976): stopping server b458937b0f5f,45889,1732246733960; all regions closed. 2024-11-22T03:40:02,435 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,435 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,435 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,435 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,435 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741834_1010 (size=8107) 2024-11-22T03:40:02,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741834_1010 (size=8107) 2024-11-22T03:40:02,439 DEBUG [RS:0;b458937b0f5f:45889 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/oldWALs 2024-11-22T03:40:02,439 INFO [RS:0;b458937b0f5f:45889 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C45889%2C1732246733960.meta:.meta(num 1732246734963) 2024-11-22T03:40:02,440 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,440 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,440 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,440 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,440 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741880_1056 (size=778) 2024-11-22T03:40:02,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741880_1056 (size=778) 2024-11-22T03:40:02,444 DEBUG [RS:0;b458937b0f5f:45889 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/oldWALs 2024-11-22T03:40:02,444 INFO [RS:0;b458937b0f5f:45889 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C45889%2C1732246733960:(num 1732246802129) 2024-11-22T03:40:02,444 DEBUG [RS:0;b458937b0f5f:45889 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:40:02,444 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:40:02,444 INFO [RS:0;b458937b0f5f:45889 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:40:02,444 INFO [RS:0;b458937b0f5f:45889 {}] hbase.ChoreService(370): Chore service for: regionserver/b458937b0f5f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:40:02,444 INFO [RS:0;b458937b0f5f:45889 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:40:02,444 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:40:02,444 INFO [RS:0;b458937b0f5f:45889 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45889 2024-11-22T03:40:02,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b458937b0f5f,45889,1732246733960 2024-11-22T03:40:02,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:40:02,481 INFO [RS:0;b458937b0f5f:45889 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:40:02,491 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b458937b0f5f,45889,1732246733960] 2024-11-22T03:40:02,501 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b458937b0f5f,45889,1732246733960 already deleted, retry=false 2024-11-22T03:40:02,502 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b458937b0f5f,45889,1732246733960 expired; onlineServers=0 2024-11-22T03:40:02,502 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b458937b0f5f,38087,1732246733790' ***** 2024-11-22T03:40:02,502 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:40:02,502 INFO [M:0;b458937b0f5f:38087 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:40:02,502 INFO [M:0;b458937b0f5f:38087 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:40:02,502 DEBUG [M:0;b458937b0f5f:38087 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:40:02,502 DEBUG [M:0;b458937b0f5f:38087 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:40:02,502 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:40:02,502 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246734292 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246734292,5,FailOnTimeoutGroup] 2024-11-22T03:40:02,502 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246734293 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246734293,5,FailOnTimeoutGroup] 2024-11-22T03:40:02,502 INFO [M:0;b458937b0f5f:38087 {}] hbase.ChoreService(370): Chore service for: master/b458937b0f5f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:40:02,502 INFO [M:0;b458937b0f5f:38087 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:40:02,502 DEBUG [M:0;b458937b0f5f:38087 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:40:02,503 INFO [M:0;b458937b0f5f:38087 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:40:02,503 INFO [M:0;b458937b0f5f:38087 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:40:02,503 INFO [M:0;b458937b0f5f:38087 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:40:02,503 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:40:02,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:40:02,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:02,512 DEBUG [M:0;b458937b0f5f:38087 {}] zookeeper.ZKUtil(347): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:40:02,512 WARN [M:0;b458937b0f5f:38087 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:40:02,513 INFO [M:0;b458937b0f5f:38087 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/.lastflushedseqids 2024-11-22T03:40:02,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741882_1058 (size=228) 2024-11-22T03:40:02,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741882_1058 (size=228) 2024-11-22T03:40:02,522 INFO [M:0;b458937b0f5f:38087 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:40:02,522 INFO [M:0;b458937b0f5f:38087 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:40:02,523 DEBUG [M:0;b458937b0f5f:38087 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:40:02,523 INFO [M:0;b458937b0f5f:38087 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:40:02,523 DEBUG [M:0;b458937b0f5f:38087 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:40:02,523 DEBUG [M:0;b458937b0f5f:38087 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:40:02,523 DEBUG [M:0;b458937b0f5f:38087 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:40:02,523 INFO [M:0;b458937b0f5f:38087 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-22T03:40:02,538 DEBUG [M:0;b458937b0f5f:38087 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b8abee5fadef40a3aea223d8e90eda6a is 82, key is hbase:meta,,1/info:regioninfo/1732246734991/Put/seqid=0 2024-11-22T03:40:02,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741883_1059 (size=5672) 2024-11-22T03:40:02,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741883_1059 (size=5672) 2024-11-22T03:40:02,543 INFO [M:0;b458937b0f5f:38087 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b8abee5fadef40a3aea223d8e90eda6a 2024-11-22T03:40:02,560 DEBUG [M:0;b458937b0f5f:38087 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/335ca156c072495fb5fd37f2380e3899 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732246735483/Put/seqid=0 2024-11-22T03:40:02,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741884_1060 (size=7090) 2024-11-22T03:40:02,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741884_1060 (size=7090) 2024-11-22T03:40:02,564 INFO [M:0;b458937b0f5f:38087 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/335ca156c072495fb5fd37f2380e3899 2024-11-22T03:40:02,569 INFO [M:0;b458937b0f5f:38087 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 335ca156c072495fb5fd37f2380e3899 2024-11-22T03:40:02,582 DEBUG [M:0;b458937b0f5f:38087 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/423e40b858ff4666a6a0d1c53e1526b9 is 69, key is b458937b0f5f,45889,1732246733960/rs:state/1732246734325/Put/seqid=0 2024-11-22T03:40:02,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741885_1061 (size=5156) 2024-11-22T03:40:02,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741885_1061 (size=5156) 2024-11-22T03:40:02,587 INFO [M:0;b458937b0f5f:38087 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/423e40b858ff4666a6a0d1c53e1526b9 2024-11-22T03:40:02,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:40:02,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45889-0x101609f7f770001, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:40:02,591 INFO [RS:0;b458937b0f5f:45889 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:40:02,591 INFO [RS:0;b458937b0f5f:45889 {}] regionserver.HRegionServer(1031): Exiting; stopping=b458937b0f5f,45889,1732246733960; zookeeper connection closed. 2024-11-22T03:40:02,591 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2bff8103 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2bff8103 2024-11-22T03:40:02,592 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T03:40:02,607 DEBUG [M:0;b458937b0f5f:38087 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8742f7f1acdc42159c3897bac42f1089 is 52, key is load_balancer_on/state:d/1732246735102/Put/seqid=0 2024-11-22T03:40:02,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741886_1062 (size=5056) 2024-11-22T03:40:02,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741886_1062 (size=5056) 2024-11-22T03:40:02,612 INFO [M:0;b458937b0f5f:38087 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8742f7f1acdc42159c3897bac42f1089 2024-11-22T03:40:02,616 DEBUG [M:0;b458937b0f5f:38087 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b8abee5fadef40a3aea223d8e90eda6a as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b8abee5fadef40a3aea223d8e90eda6a 2024-11-22T03:40:02,620 INFO [M:0;b458937b0f5f:38087 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b8abee5fadef40a3aea223d8e90eda6a, entries=8, sequenceid=125, filesize=5.5 K 2024-11-22T03:40:02,621 DEBUG [M:0;b458937b0f5f:38087 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/335ca156c072495fb5fd37f2380e3899 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/335ca156c072495fb5fd37f2380e3899 2024-11-22T03:40:02,625 INFO [M:0;b458937b0f5f:38087 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 335ca156c072495fb5fd37f2380e3899 2024-11-22T03:40:02,625 INFO [M:0;b458937b0f5f:38087 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/335ca156c072495fb5fd37f2380e3899, entries=13, sequenceid=125, filesize=6.9 K 2024-11-22T03:40:02,626 DEBUG [M:0;b458937b0f5f:38087 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/423e40b858ff4666a6a0d1c53e1526b9 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/423e40b858ff4666a6a0d1c53e1526b9 2024-11-22T03:40:02,630 INFO [M:0;b458937b0f5f:38087 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/423e40b858ff4666a6a0d1c53e1526b9, entries=1, sequenceid=125, filesize=5.0 K 2024-11-22T03:40:02,631 DEBUG [M:0;b458937b0f5f:38087 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8742f7f1acdc42159c3897bac42f1089 as hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8742f7f1acdc42159c3897bac42f1089 2024-11-22T03:40:02,635 INFO [M:0;b458937b0f5f:38087 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/6f368fb6-2551-1119-611d-9417550c40bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8742f7f1acdc42159c3897bac42f1089, entries=1, sequenceid=125, filesize=4.9 K 2024-11-22T03:40:02,636 INFO [M:0;b458937b0f5f:38087 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=125, compaction requested=false 2024-11-22T03:40:02,638 INFO [M:0;b458937b0f5f:38087 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:40:02,638 DEBUG [M:0;b458937b0f5f:38087 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246802522Disabling compacts and flushes for region at 1732246802522Disabling writes for close at 1732246802523 (+1 ms)Obtaining lock to block concurrent updates at 1732246802523Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732246802523Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1732246802523Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732246802524 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732246802524Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732246802538 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732246802538Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732246802547 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732246802559 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732246802559Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732246802569 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732246802581 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732246802581Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732246802590 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732246802606 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732246802606Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43dc9182: reopening flushed file at 1732246802616 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19fa305e: reopening flushed file at 1732246802621 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d4e8580: reopening flushed file at 1732246802625 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ea5b349: reopening flushed file at 1732246802630 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=125, compaction requested=false at 1732246802636 (+6 ms)Writing region close event to WAL at 1732246802637 (+1 ms)Closed at 1732246802637 2024-11-22T03:40:02,638 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,638 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,638 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,638 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,638 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:02,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45927 is added to blk_1073741830_1006 (size=61320) 2024-11-22T03:40:02,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741830_1006 (size=61320) 2024-11-22T03:40:02,641 INFO [M:0;b458937b0f5f:38087 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:40:02,641 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:40:02,641 INFO [M:0;b458937b0f5f:38087 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38087 2024-11-22T03:40:02,641 INFO [M:0;b458937b0f5f:38087 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:40:02,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:40:02,749 INFO [M:0;b458937b0f5f:38087 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:40:02,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38087-0x101609f7f770000, quorum=127.0.0.1:62408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:40:02,752 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ed7c159{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:40:02,752 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b675707{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:40:02,752 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:40:02,752 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@661bea50{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:40:02,752 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13fc1909{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/hadoop.log.dir/,STOPPED} 2024-11-22T03:40:02,753 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:40:02,753 WARN [BP-575432354-172.17.0.3-1732246731445 heartbeating to localhost/127.0.0.1:41491 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:40:02,753 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:40:02,753 WARN [BP-575432354-172.17.0.3-1732246731445 heartbeating to localhost/127.0.0.1:41491 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-575432354-172.17.0.3-1732246731445 (Datanode Uuid 34b1f1ad-42a1-494f-b4e2-d9c5d87684e7) service to localhost/127.0.0.1:41491 2024-11-22T03:40:02,754 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/cluster_477d7af3-8b32-623c-a518-613b450cb07a/data/data3/current/BP-575432354-172.17.0.3-1732246731445 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:40:02,754 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/cluster_477d7af3-8b32-623c-a518-613b450cb07a/data/data4/current/BP-575432354-172.17.0.3-1732246731445 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:40:02,754 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:40:02,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e22ffa9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:40:02,760 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b5eb244{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:40:02,760 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:40:02,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b569bd2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:40:02,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58b83575{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/hadoop.log.dir/,STOPPED} 2024-11-22T03:40:02,762 WARN [BP-575432354-172.17.0.3-1732246731445 heartbeating to localhost/127.0.0.1:41491 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:40:02,762 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:40:02,762 WARN [BP-575432354-172.17.0.3-1732246731445 heartbeating to localhost/127.0.0.1:41491 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-575432354-172.17.0.3-1732246731445 (Datanode Uuid cb1230d8-75e7-4a7b-88f4-58674992cc6c) service to localhost/127.0.0.1:41491 2024-11-22T03:40:02,762 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:40:02,762 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/cluster_477d7af3-8b32-623c-a518-613b450cb07a/data/data1/current/BP-575432354-172.17.0.3-1732246731445 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:40:02,763 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/cluster_477d7af3-8b32-623c-a518-613b450cb07a/data/data2/current/BP-575432354-172.17.0.3-1732246731445 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:40:02,763 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:40:02,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@af49f38{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:40:02,768 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@240b8237{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:40:02,768 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:40:02,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c149881{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:40:02,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74b22f54{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/hadoop.log.dir/,STOPPED} 2024-11-22T03:40:02,774 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:40:02,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:40:02,809 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 207) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41491 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:41491 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41491 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:41491 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41491 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41491 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41491 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41491 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:41491 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=518 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=161 (was 183), ProcessCount=11 (was 11), AvailableMemoryMB=6671 (was 5957) - AvailableMemoryMB LEAK? - 2024-11-22T03:40:02,817 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=518, MaxFileDescriptor=1048576, SystemLoadAverage=161, ProcessCount=11, AvailableMemoryMB=6671 2024-11-22T03:40:02,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:40:02,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/hadoop.log.dir so I do NOT create it in target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f 2024-11-22T03:40:02,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ffad9c-80ac-facf-2e9a-43cadb99a329/hadoop.tmp.dir so I do NOT create it in target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f 2024-11-22T03:40:02,817 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/cluster_048b44b4-a4d8-86d9-cee4-c7274e4e832a, deleteOnExit=true 2024-11-22T03:40:02,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:40:02,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/test.cache.data in system properties and HBase conf 2024-11-22T03:40:02,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:40:02,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:40:02,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:40:02,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:40:02,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:40:02,818 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:40:02,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:40:02,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:40:02,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:40:02,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:40:02,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:40:02,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:40:02,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:40:02,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:40:02,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:40:02,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:40:02,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:40:02,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:40:02,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:40:02,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:40:02,830 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:40:03,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:03,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:03,336 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:40:03,339 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:40:03,340 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:40:03,340 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:40:03,340 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:40:03,340 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:40:03,340 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cce16d3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:40:03,341 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c1c6682{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:40:03,434 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b3b93a7{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/java.io.tmpdir/jetty-localhost-37719-hadoop-hdfs-3_4_1-tests_jar-_-any-11846615246139309142/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:40:03,434 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e8c567b{HTTP/1.1, (http/1.1)}{localhost:37719} 2024-11-22T03:40:03,434 INFO [Time-limited test {}] server.Server(415): Started @322852ms 2024-11-22T03:40:03,445 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:40:03,664 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:40:03,666 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:40:03,667 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:40:03,667 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:40:03,667 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:40:03,667 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@751df37f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:40:03,667 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76ba698c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:40:03,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78d8a6b1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/java.io.tmpdir/jetty-localhost-39039-hadoop-hdfs-3_4_1-tests_jar-_-any-13602289934675116485/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:40:03,759 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5794b4b6{HTTP/1.1, (http/1.1)}{localhost:39039} 2024-11-22T03:40:03,759 INFO [Time-limited test {}] server.Server(415): Started @323176ms 2024-11-22T03:40:03,760 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:40:03,784 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:40:03,786 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:40:03,787 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:40:03,787 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:40:03,787 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:40:03,788 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11436c17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:40:03,788 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62eaa2bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:40:03,882 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ac6b403{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/java.io.tmpdir/jetty-localhost-34639-hadoop-hdfs-3_4_1-tests_jar-_-any-7578893868624553697/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:40:03,882 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5c95cc3d{HTTP/1.1, (http/1.1)}{localhost:34639} 2024-11-22T03:40:03,882 INFO [Time-limited test {}] server.Server(415): Started @323300ms 2024-11-22T03:40:03,883 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:40:04,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:04,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:04,877 WARN [Thread-2509 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/cluster_048b44b4-a4d8-86d9-cee4-c7274e4e832a/data/data1/current/BP-620697003-172.17.0.3-1732246802833/current, will proceed with Du for space computation calculation, 2024-11-22T03:40:04,877 WARN [Thread-2510 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/cluster_048b44b4-a4d8-86d9-cee4-c7274e4e832a/data/data2/current/BP-620697003-172.17.0.3-1732246802833/current, will proceed with Du for space computation calculation, 2024-11-22T03:40:04,896 WARN [Thread-2473 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:40:04,898 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcb2f8f6815a11591 with lease ID 0x2a64cfc6fa33ae8: Processing first storage report for DS-a0afcbdd-7a6f-4fcd-86c6-6d542867941d from datanode DatanodeRegistration(127.0.0.1:43511, datanodeUuid=96ab1abc-1a8b-4ae6-93f4-4f06f1d63aa9, infoPort=46501, infoSecurePort=0, ipcPort=34813, storageInfo=lv=-57;cid=testClusterID;nsid=1673069801;c=1732246802833) 2024-11-22T03:40:04,898 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb2f8f6815a11591 with lease ID 0x2a64cfc6fa33ae8: from storage DS-a0afcbdd-7a6f-4fcd-86c6-6d542867941d node DatanodeRegistration(127.0.0.1:43511, datanodeUuid=96ab1abc-1a8b-4ae6-93f4-4f06f1d63aa9, infoPort=46501, infoSecurePort=0, ipcPort=34813, storageInfo=lv=-57;cid=testClusterID;nsid=1673069801;c=1732246802833), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:40:04,898 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcb2f8f6815a11591 with lease ID 0x2a64cfc6fa33ae8: Processing first storage report for DS-5ac09d06-c073-497f-819a-511b4dd351b5 from datanode DatanodeRegistration(127.0.0.1:43511, datanodeUuid=96ab1abc-1a8b-4ae6-93f4-4f06f1d63aa9, infoPort=46501, infoSecurePort=0, ipcPort=34813, storageInfo=lv=-57;cid=testClusterID;nsid=1673069801;c=1732246802833) 2024-11-22T03:40:04,898 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb2f8f6815a11591 with lease ID 0x2a64cfc6fa33ae8: from storage DS-5ac09d06-c073-497f-819a-511b4dd351b5 node DatanodeRegistration(127.0.0.1:43511, datanodeUuid=96ab1abc-1a8b-4ae6-93f4-4f06f1d63aa9, infoPort=46501, infoSecurePort=0, ipcPort=34813, storageInfo=lv=-57;cid=testClusterID;nsid=1673069801;c=1732246802833), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:40:05,033 WARN [Thread-2521 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/cluster_048b44b4-a4d8-86d9-cee4-c7274e4e832a/data/data4/current/BP-620697003-172.17.0.3-1732246802833/current, will proceed with Du for space computation calculation, 2024-11-22T03:40:05,033 WARN [Thread-2520 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/cluster_048b44b4-a4d8-86d9-cee4-c7274e4e832a/data/data3/current/BP-620697003-172.17.0.3-1732246802833/current, will proceed with Du for space computation calculation, 2024-11-22T03:40:05,056 WARN [Thread-2496 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:40:05,058 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6c3c5262f82652ef with lease ID 0x2a64cfc6fa33ae9: Processing first storage report for DS-9f22aa6d-36cf-4b18-a96a-a31267f9b93f from datanode DatanodeRegistration(127.0.0.1:40483, datanodeUuid=f80a2bd4-a694-48b5-96db-7fe3ad150289, infoPort=45605, infoSecurePort=0, ipcPort=35923, storageInfo=lv=-57;cid=testClusterID;nsid=1673069801;c=1732246802833) 2024-11-22T03:40:05,058 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6c3c5262f82652ef with lease ID 0x2a64cfc6fa33ae9: from storage DS-9f22aa6d-36cf-4b18-a96a-a31267f9b93f node DatanodeRegistration(127.0.0.1:40483, datanodeUuid=f80a2bd4-a694-48b5-96db-7fe3ad150289, infoPort=45605, infoSecurePort=0, ipcPort=35923, storageInfo=lv=-57;cid=testClusterID;nsid=1673069801;c=1732246802833), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T03:40:05,058 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6c3c5262f82652ef with lease ID 0x2a64cfc6fa33ae9: Processing first storage report for DS-ea696f0e-b288-463b-a08a-75ead7d19e26 from datanode DatanodeRegistration(127.0.0.1:40483, datanodeUuid=f80a2bd4-a694-48b5-96db-7fe3ad150289, infoPort=45605, infoSecurePort=0, ipcPort=35923, storageInfo=lv=-57;cid=testClusterID;nsid=1673069801;c=1732246802833) 2024-11-22T03:40:05,058 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6c3c5262f82652ef with lease ID 0x2a64cfc6fa33ae9: from storage DS-ea696f0e-b288-463b-a08a-75ead7d19e26 node DatanodeRegistration(127.0.0.1:40483, datanodeUuid=f80a2bd4-a694-48b5-96db-7fe3ad150289, infoPort=45605, infoSecurePort=0, ipcPort=35923, storageInfo=lv=-57;cid=testClusterID;nsid=1673069801;c=1732246802833), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:40:05,111 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f 2024-11-22T03:40:05,116 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/cluster_048b44b4-a4d8-86d9-cee4-c7274e4e832a/zookeeper_0, clientPort=59590, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/cluster_048b44b4-a4d8-86d9-cee4-c7274e4e832a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/cluster_048b44b4-a4d8-86d9-cee4-c7274e4e832a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:40:05,118 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59590 2024-11-22T03:40:05,118 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:40:05,121 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:40:05,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:40:05,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:40:05,133 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67 with version=8 2024-11-22T03:40:05,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35981/user/jenkins/test-data/01d944eb-9ed8-4e99-3bd0-df5fc17e48d4/hbase-staging 2024-11-22T03:40:05,134 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:40:05,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:40:05,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:40:05,135 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:40:05,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:40:05,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:40:05,135 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:40:05,135 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:40:05,136 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45127 2024-11-22T03:40:05,137 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45127 connecting to ZooKeeper ensemble=127.0.0.1:59590 2024-11-22T03:40:05,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:451270x0, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:40:05,187 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45127-0x10160a096260000 connected 2024-11-22T03:40:05,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:05,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:05,271 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:40:05,274 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:40:05,278 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:40:05,278 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67, hbase.cluster.distributed=false 2024-11-22T03:40:05,280 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:40:05,280 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45127 2024-11-22T03:40:05,280 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45127 2024-11-22T03:40:05,280 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45127 2024-11-22T03:40:05,281 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45127 2024-11-22T03:40:05,281 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45127 2024-11-22T03:40:05,295 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b458937b0f5f:0 server-side Connection retries=45 2024-11-22T03:40:05,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:40:05,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:40:05,295 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:40:05,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:40:05,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:40:05,295 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:40:05,295 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:40:05,296 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40841 2024-11-22T03:40:05,296 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40841 connecting to ZooKeeper ensemble=127.0.0.1:59590 2024-11-22T03:40:05,297 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:40:05,298 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:40:05,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:408410x0, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:40:05,312 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40841-0x10160a096260001 connected 2024-11-22T03:40:05,312 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:40:05,313 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:40:05,313 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:40:05,314 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:40:05,315 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:40:05,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40841 2024-11-22T03:40:05,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40841 2024-11-22T03:40:05,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40841 2024-11-22T03:40:05,319 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40841 2024-11-22T03:40:05,319 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40841 2024-11-22T03:40:05,329 DEBUG [M:0;b458937b0f5f:45127 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b458937b0f5f:45127 2024-11-22T03:40:05,329 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b458937b0f5f,45127,1732246805134 2024-11-22T03:40:05,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:40:05,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:40:05,342 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b458937b0f5f,45127,1732246805134 2024-11-22T03:40:05,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:05,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:40:05,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:05,354 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:40:05,355 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b458937b0f5f,45127,1732246805134 from backup master directory 2024-11-22T03:40:05,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b458937b0f5f,45127,1732246805134 2024-11-22T03:40:05,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:40:05,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:40:05,365 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:40:05,365 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b458937b0f5f,45127,1732246805134 2024-11-22T03:40:05,369 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/hbase.id] with ID: 5069ea58-dd25-422d-a9b3-dcf87a558c3a 2024-11-22T03:40:05,369 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/.tmp/hbase.id 2024-11-22T03:40:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:40:05,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:40:05,376 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/.tmp/hbase.id]:[hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/hbase.id] 2024-11-22T03:40:05,389 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:40:05,390 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:40:05,391 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T03:40:05,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:05,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:05,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:40:05,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:40:05,402 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:40:05,403 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:40:05,403 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:40:05,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:40:05,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:40:05,410 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store 2024-11-22T03:40:05,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:40:05,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:40:05,421 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:40:05,421 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:40:05,421 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:40:05,421 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:40:05,421 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:40:05,421 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:40:05,421 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:40:05,421 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246805421Disabling compacts and flushes for region at 1732246805421Disabling writes for close at 1732246805421Writing region close event to WAL at 1732246805421Closed at 1732246805421 2024-11-22T03:40:05,422 WARN [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/.initializing 2024-11-22T03:40:05,422 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/WALs/b458937b0f5f,45127,1732246805134 2024-11-22T03:40:05,424 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C45127%2C1732246805134, suffix=, logDir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/WALs/b458937b0f5f,45127,1732246805134, archiveDir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/oldWALs, maxLogs=10 2024-11-22T03:40:05,424 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C45127%2C1732246805134.1732246805424 2024-11-22T03:40:05,429 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/WALs/b458937b0f5f,45127,1732246805134/b458937b0f5f%2C45127%2C1732246805134.1732246805424 2024-11-22T03:40:05,432 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46501:46501),(127.0.0.1/127.0.0.1:45605:45605)] 2024-11-22T03:40:05,434 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:40:05,435 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:40:05,435 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:40:05,435 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:40:05,436 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:40:05,437 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:40:05,437 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:05,438 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:40:05,438 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:40:05,439 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:40:05,439 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:05,439 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:40:05,439 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:40:05,440 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:40:05,440 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:05,441 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:40:05,441 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:40:05,442 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:40:05,442 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:05,442 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:40:05,442 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:40:05,443 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:40:05,443 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:40:05,444 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:40:05,444 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:40:05,445 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:40:05,446 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:40:05,449 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:40:05,449 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733264, jitterRate=-0.06760716438293457}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:40:05,450 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732246805435Initializing all the Stores at 1732246805436 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246805436Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246805436Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246805436Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246805436Cleaning up temporary data from old regions at 1732246805444 (+8 ms)Region opened successfully at 1732246805450 (+6 ms) 2024-11-22T03:40:05,452 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:40:05,455 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@648fd28, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:40:05,456 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:40:05,457 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:40:05,457 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:40:05,457 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:40:05,458 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T03:40:05,458 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T03:40:05,458 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:40:05,462 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:40:05,462 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:40:05,470 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:40:05,470 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:40:05,471 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:40:05,480 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:40:05,481 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:40:05,482 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:40:05,491 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:40:05,492 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:40:05,501 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:40:05,506 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:40:05,512 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:40:05,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:40:05,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:40:05,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:05,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:05,523 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b458937b0f5f,45127,1732246805134, sessionid=0x10160a096260000, setting cluster-up flag (Was=false) 2024-11-22T03:40:05,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:05,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:05,575 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:40:05,577 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,45127,1732246805134 2024-11-22T03:40:05,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:05,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:05,628 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:40:05,632 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b458937b0f5f,45127,1732246805134 2024-11-22T03:40:05,635 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:40:05,638 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:40:05,639 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:40:05,639 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:40:05,639 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b458937b0f5f,45127,1732246805134 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:40:05,643 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:40:05,643 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:40:05,643 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:40:05,643 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b458937b0f5f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:40:05,643 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b458937b0f5f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:40:05,643 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,643 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:40:05,644 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,645 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732246835645 2024-11-22T03:40:05,645 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:40:05,645 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:40:05,645 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:40:05,645 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:40:05,645 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:40:05,645 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:40:05,645 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:40:05,645 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:40:05,646 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,646 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:40:05,646 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:05,646 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:40:05,646 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:40:05,646 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:40:05,647 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:40:05,647 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:40:05,647 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246805647,5,FailOnTimeoutGroup] 2024-11-22T03:40:05,647 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246805647,5,FailOnTimeoutGroup] 2024-11-22T03:40:05,647 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,647 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:40:05,648 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,648 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:40:05,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:40:05,653 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:40:05,653 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67 2024-11-22T03:40:05,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:40:05,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:40:05,660 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:40:05,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:40:05,662 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:40:05,662 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:05,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:40:05,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:40:05,664 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:40:05,664 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:05,664 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:40:05,664 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:40:05,665 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:40:05,665 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:05,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:40:05,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:40:05,666 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:40:05,667 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:05,667 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:40:05,667 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:40:05,668 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/data/hbase/meta/1588230740 2024-11-22T03:40:05,668 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/data/hbase/meta/1588230740 2024-11-22T03:40:05,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:40:05,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:40:05,670 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:40:05,672 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:40:05,674 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:40:05,675 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845701, jitterRate=0.07536491751670837}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:40:05,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732246805660Initializing all the Stores at 1732246805661 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246805661Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246805661Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246805661Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246805661Cleaning up temporary data from old regions at 1732246805670 (+9 ms)Region opened successfully at 1732246805676 (+6 ms) 2024-11-22T03:40:05,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:40:05,676 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:40:05,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:40:05,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:40:05,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:40:05,676 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:40:05,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246805676Disabling compacts and flushes for region at 1732246805676Disabling writes for close at 1732246805676Writing region close event to WAL at 1732246805676Closed at 1732246805676 2024-11-22T03:40:05,678 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:40:05,678 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:40:05,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:40:05,679 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:40:05,681 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:40:05,722 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(746): ClusterId : 5069ea58-dd25-422d-a9b3-dcf87a558c3a 2024-11-22T03:40:05,722 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:40:05,735 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:40:05,735 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:40:05,744 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:40:05,745 DEBUG [RS:0;b458937b0f5f:40841 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12f2167c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b458937b0f5f/172.17.0.3:0 2024-11-22T03:40:05,755 DEBUG [RS:0;b458937b0f5f:40841 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b458937b0f5f:40841 2024-11-22T03:40:05,755 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:40:05,755 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:40:05,755 DEBUG [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:40:05,756 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(2659): reportForDuty to master=b458937b0f5f,45127,1732246805134 with port=40841, startcode=1732246805294 2024-11-22T03:40:05,756 DEBUG [RS:0;b458937b0f5f:40841 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:40:05,758 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57287, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:40:05,758 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45127 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b458937b0f5f,40841,1732246805294 2024-11-22T03:40:05,758 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45127 {}] master.ServerManager(517): Registering regionserver=b458937b0f5f,40841,1732246805294 2024-11-22T03:40:05,760 DEBUG [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67 2024-11-22T03:40:05,760 DEBUG [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45665 2024-11-22T03:40:05,760 DEBUG [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:40:05,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:40:05,765 DEBUG [RS:0;b458937b0f5f:40841 {}] zookeeper.ZKUtil(111): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b458937b0f5f,40841,1732246805294 2024-11-22T03:40:05,765 WARN [RS:0;b458937b0f5f:40841 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:40:05,765 INFO [RS:0;b458937b0f5f:40841 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:40:05,765 DEBUG [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/WALs/b458937b0f5f,40841,1732246805294 2024-11-22T03:40:05,765 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b458937b0f5f,40841,1732246805294] 2024-11-22T03:40:05,769 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:40:05,770 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:40:05,770 INFO [RS:0;b458937b0f5f:40841 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:40:05,771 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,771 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:40:05,771 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:40:05,772 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b458937b0f5f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b458937b0f5f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:40:05,772 DEBUG [RS:0;b458937b0f5f:40841 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b458937b0f5f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:40:05,773 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,773 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,773 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,773 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,773 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,773 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40841,1732246805294-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:40:05,790 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:40:05,791 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,40841,1732246805294-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,791 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,791 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.Replication(171): b458937b0f5f,40841,1732246805294 started 2024-11-22T03:40:05,802 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:05,802 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(1482): Serving as b458937b0f5f,40841,1732246805294, RpcServer on b458937b0f5f/172.17.0.3:40841, sessionid=0x10160a096260001 2024-11-22T03:40:05,802 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:40:05,802 DEBUG [RS:0;b458937b0f5f:40841 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b458937b0f5f,40841,1732246805294 2024-11-22T03:40:05,802 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,40841,1732246805294' 2024-11-22T03:40:05,802 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:40:05,803 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:40:05,803 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:40:05,803 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:40:05,803 DEBUG [RS:0;b458937b0f5f:40841 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b458937b0f5f,40841,1732246805294 2024-11-22T03:40:05,803 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b458937b0f5f,40841,1732246805294' 2024-11-22T03:40:05,803 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:40:05,804 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:40:05,804 DEBUG [RS:0;b458937b0f5f:40841 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:40:05,804 INFO [RS:0;b458937b0f5f:40841 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:40:05,804 INFO [RS:0;b458937b0f5f:40841 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:40:05,831 WARN [b458937b0f5f:45127 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:40:05,906 INFO [RS:0;b458937b0f5f:40841 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C40841%2C1732246805294, suffix=, logDir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/WALs/b458937b0f5f,40841,1732246805294, archiveDir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/oldWALs, maxLogs=32 2024-11-22T03:40:05,907 INFO [RS:0;b458937b0f5f:40841 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C40841%2C1732246805294.1732246805906 2024-11-22T03:40:05,912 INFO [RS:0;b458937b0f5f:40841 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/WALs/b458937b0f5f,40841,1732246805294/b458937b0f5f%2C40841%2C1732246805294.1732246805906 2024-11-22T03:40:05,916 DEBUG [RS:0;b458937b0f5f:40841 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46501:46501),(127.0.0.1/127.0.0.1:45605:45605)] 2024-11-22T03:40:06,081 DEBUG [b458937b0f5f:45127 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:40:06,082 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b458937b0f5f,40841,1732246805294 2024-11-22T03:40:06,084 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,40841,1732246805294, state=OPENING 2024-11-22T03:40:06,165 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:40:06,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:06,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:06,177 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:40:06,177 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:40:06,177 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:40:06,177 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,40841,1732246805294}] 2024-11-22T03:40:06,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,39625,1732246592490/b458937b0f5f%2C39625%2C1732246592490.meta.1732246593580.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:06,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33231/user/jenkins/test-data/cad9eddb-78fe-7fe3-804b-e04b578188ce/WALs/b458937b0f5f,43975,1732246593751/b458937b0f5f%2C43975%2C1732246593751.1732246593983 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:40:06,333 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:40:06,334 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34763, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:40:06,337 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:40:06,337 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:40:06,339 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b458937b0f5f%2C40841%2C1732246805294.meta, suffix=.meta, logDir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/WALs/b458937b0f5f,40841,1732246805294, archiveDir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/oldWALs, maxLogs=32 2024-11-22T03:40:06,340 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b458937b0f5f%2C40841%2C1732246805294.meta.1732246806339.meta 2024-11-22T03:40:06,349 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/WALs/b458937b0f5f,40841,1732246805294/b458937b0f5f%2C40841%2C1732246805294.meta.1732246806339.meta 2024-11-22T03:40:06,355 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46501:46501),(127.0.0.1/127.0.0.1:45605:45605)] 2024-11-22T03:40:06,360 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:40:06,361 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:40:06,361 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:40:06,361 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:40:06,361 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:40:06,361 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:40:06,361 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:40:06,361 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:40:06,363 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:40:06,364 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:40:06,364 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:06,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:40:06,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:40:06,365 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:40:06,365 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:06,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:40:06,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:40:06,366 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:40:06,366 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:06,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:40:06,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:40:06,367 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:40:06,367 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:40:06,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:40:06,368 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:40:06,369 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/data/hbase/meta/1588230740 2024-11-22T03:40:06,370 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/data/hbase/meta/1588230740 2024-11-22T03:40:06,371 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:40:06,371 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:40:06,371 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:40:06,372 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:40:06,373 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728289, jitterRate=-0.07393279671669006}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:40:06,373 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:40:06,374 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732246806361Writing region info on filesystem at 1732246806361Initializing all the Stores at 1732246806362 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246806362Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246806363 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732246806363Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732246806363Cleaning up temporary data from old regions at 1732246806371 (+8 ms)Running coprocessor post-open hooks at 1732246806373 (+2 ms)Region opened successfully at 1732246806374 (+1 ms) 2024-11-22T03:40:06,374 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732246806332 2024-11-22T03:40:06,376 DEBUG [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:40:06,376 INFO [RS_OPEN_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:40:06,377 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b458937b0f5f,40841,1732246805294 2024-11-22T03:40:06,378 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b458937b0f5f,40841,1732246805294, state=OPEN 2024-11-22T03:40:06,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:40:06,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:40:06,412 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b458937b0f5f,40841,1732246805294 2024-11-22T03:40:06,412 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:40:06,412 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:40:06,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:40:06,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b458937b0f5f,40841,1732246805294 in 235 msec 2024-11-22T03:40:06,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:40:06,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 738 msec 2024-11-22T03:40:06,420 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:40:06,420 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:40:06,422 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:40:06,422 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,40841,1732246805294, seqNum=-1] 2024-11-22T03:40:06,422 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:40:06,424 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45173, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:40:06,431 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 794 msec 2024-11-22T03:40:06,431 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732246806431, completionTime=-1 2024-11-22T03:40:06,431 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:40:06,431 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:40:06,434 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:40:06,434 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732246866434 2024-11-22T03:40:06,434 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732246926434 2024-11-22T03:40:06,434 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T03:40:06,434 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,45127,1732246805134-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:06,434 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,45127,1732246805134-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:06,434 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,45127,1732246805134-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:06,434 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b458937b0f5f:45127, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:06,434 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:06,435 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:06,436 DEBUG [master/b458937b0f5f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:40:06,439 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.074sec 2024-11-22T03:40:06,439 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:40:06,439 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:40:06,439 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:40:06,439 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:40:06,439 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:40:06,439 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,45127,1732246805134-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:40:06,439 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,45127,1732246805134-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:40:06,441 DEBUG [master/b458937b0f5f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:40:06,442 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:40:06,442 INFO [master/b458937b0f5f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b458937b0f5f,45127,1732246805134-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:40:06,522 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4eaedea1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:40:06,522 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b458937b0f5f,45127,-1 for getting cluster id 2024-11-22T03:40:06,523 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:40:06,525 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5069ea58-dd25-422d-a9b3-dcf87a558c3a' 2024-11-22T03:40:06,526 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:40:06,526 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5069ea58-dd25-422d-a9b3-dcf87a558c3a" 2024-11-22T03:40:06,527 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@162e873b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:40:06,527 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b458937b0f5f,45127,-1] 2024-11-22T03:40:06,527 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:40:06,528 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:40:06,529 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37960, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:40:06,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ea3d1bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:40:06,531 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:40:06,532 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b458937b0f5f,40841,1732246805294, seqNum=-1] 2024-11-22T03:40:06,532 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:40:06,533 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47224, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:40:06,535 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b458937b0f5f,45127,1732246805134 2024-11-22T03:40:06,535 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:40:06,538 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:40:06,538 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:40:06,540 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/WALs/test.com,8080,1, archiveDir=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/oldWALs, maxLogs=32 2024-11-22T03:40:06,540 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732246806540 2024-11-22T03:40:06,544 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/WALs/test.com,8080,1/test.com%2C8080%2C1.1732246806540 2024-11-22T03:40:06,545 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46501:46501),(127.0.0.1/127.0.0.1:45605:45605)] 2024-11-22T03:40:06,546 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732246806546 2024-11-22T03:40:06,552 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,552 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,553 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,553 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,553 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,553 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/WALs/test.com,8080,1/test.com%2C8080%2C1.1732246806540 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/WALs/test.com,8080,1/test.com%2C8080%2C1.1732246806546 2024-11-22T03:40:06,558 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45605:45605),(127.0.0.1/127.0.0.1:46501:46501)] 2024-11-22T03:40:06,558 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/WALs/test.com,8080,1/test.com%2C8080%2C1.1732246806540 is not closed yet, will try archiving it next time 2024-11-22T03:40:06,558 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,558 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,558 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,558 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741835_1011 (size=93) 2024-11-22T03:40:06,558 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741835_1011 (size=93) 2024-11-22T03:40:06,559 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/WALs/test.com,8080,1/test.com%2C8080%2C1.1732246806540 to hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/oldWALs/test.com%2C8080%2C1.1732246806540 2024-11-22T03:40:06,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741836_1012 (size=93) 2024-11-22T03:40:06,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741836_1012 (size=93) 2024-11-22T03:40:06,562 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/oldWALs 2024-11-22T03:40:06,562 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732246806546) 2024-11-22T03:40:06,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:40:06,562 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:40:06,562 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:40:06,562 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:40:06,562 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:40:06,562 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:40:06,562 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:40:06,562 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2097249439, stopped=false 2024-11-22T03:40:06,562 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b458937b0f5f,45127,1732246805134 2024-11-22T03:40:06,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:40:06,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:40:06,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:06,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:06,583 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:40:06,584 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:40:06,584 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:40:06,584 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:40:06,584 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:40:06,584 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:40:06,584 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b458937b0f5f,40841,1732246805294' ***** 2024-11-22T03:40:06,584 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:40:06,584 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:40:06,584 INFO [RS:0;b458937b0f5f:40841 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:40:06,584 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:40:06,584 INFO [RS:0;b458937b0f5f:40841 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:40:06,584 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(959): stopping server b458937b0f5f,40841,1732246805294 2024-11-22T03:40:06,584 INFO [RS:0;b458937b0f5f:40841 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:40:06,584 INFO [RS:0;b458937b0f5f:40841 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b458937b0f5f:40841. 2024-11-22T03:40:06,584 DEBUG [RS:0;b458937b0f5f:40841 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:40:06,585 DEBUG [RS:0;b458937b0f5f:40841 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:40:06,585 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:40:06,585 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:40:06,585 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:40:06,585 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:40:06,585 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-22T03:40:06,585 DEBUG [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-22T03:40:06,585 DEBUG [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-22T03:40:06,585 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:40:06,585 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:40:06,585 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:40:06,585 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:40:06,585 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:40:06,585 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-22T03:40:06,599 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/data/hbase/meta/1588230740/.tmp/ns/1e94f5ba9f9b46d4be49de5611aa356a is 43, key is default/ns:d/1732246806425/Put/seqid=0 2024-11-22T03:40:06,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741837_1013 (size=5153) 2024-11-22T03:40:06,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741837_1013 (size=5153) 2024-11-22T03:40:06,604 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/data/hbase/meta/1588230740/.tmp/ns/1e94f5ba9f9b46d4be49de5611aa356a 2024-11-22T03:40:06,610 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/data/hbase/meta/1588230740/.tmp/ns/1e94f5ba9f9b46d4be49de5611aa356a as hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/data/hbase/meta/1588230740/ns/1e94f5ba9f9b46d4be49de5611aa356a 2024-11-22T03:40:06,614 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/data/hbase/meta/1588230740/ns/1e94f5ba9f9b46d4be49de5611aa356a, entries=2, sequenceid=6, filesize=5.0 K 2024-11-22T03:40:06,615 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-22T03:40:06,615 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T03:40:06,618 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T03:40:06,619 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:40:06,619 INFO [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:40:06,619 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732246806585Running coprocessor pre-close hooks at 1732246806585Disabling compacts and flushes for region at 1732246806585Disabling writes for close at 1732246806585Obtaining lock to block concurrent updates at 1732246806585Preparing flush snapshotting stores in 1588230740 at 1732246806585Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732246806586 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732246806586Flushing 1588230740/ns: creating writer at 1732246806586Flushing 1588230740/ns: appending metadata at 1732246806599 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1732246806599Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5262fd5f: reopening flushed file at 1732246806609 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1732246806615 (+6 ms)Writing region close event to WAL at 1732246806616 (+1 ms)Running coprocessor post-close hooks at 1732246806619 (+3 ms)Closed at 1732246806619 2024-11-22T03:40:06,619 DEBUG [RS_CLOSE_META-regionserver/b458937b0f5f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:40:06,780 INFO [regionserver/b458937b0f5f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T03:40:06,780 INFO [regionserver/b458937b0f5f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T03:40:06,785 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(976): stopping server b458937b0f5f,40841,1732246805294; all regions closed. 2024-11-22T03:40:06,787 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,787 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,787 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,788 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,788 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741834_1010 (size=1152) 2024-11-22T03:40:06,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741834_1010 (size=1152) 2024-11-22T03:40:06,798 DEBUG [RS:0;b458937b0f5f:40841 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/oldWALs 2024-11-22T03:40:06,798 INFO [RS:0;b458937b0f5f:40841 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C40841%2C1732246805294.meta:.meta(num 1732246806339) 2024-11-22T03:40:06,798 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,798 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,798 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,798 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,799 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741833_1009 (size=93) 2024-11-22T03:40:06,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741833_1009 (size=93) 2024-11-22T03:40:06,803 DEBUG [RS:0;b458937b0f5f:40841 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/oldWALs 2024-11-22T03:40:06,803 INFO [RS:0;b458937b0f5f:40841 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b458937b0f5f%2C40841%2C1732246805294:(num 1732246805906) 2024-11-22T03:40:06,803 DEBUG [RS:0;b458937b0f5f:40841 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:40:06,804 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:40:06,804 INFO [RS:0;b458937b0f5f:40841 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:40:06,804 INFO [RS:0;b458937b0f5f:40841 {}] hbase.ChoreService(370): Chore service for: regionserver/b458937b0f5f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:40:06,804 INFO [RS:0;b458937b0f5f:40841 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:40:06,804 INFO [regionserver/b458937b0f5f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:40:06,804 INFO [RS:0;b458937b0f5f:40841 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40841 2024-11-22T03:40:06,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b458937b0f5f,40841,1732246805294 2024-11-22T03:40:06,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:40:06,817 INFO [RS:0;b458937b0f5f:40841 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:40:06,828 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b458937b0f5f,40841,1732246805294] 2024-11-22T03:40:06,838 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b458937b0f5f,40841,1732246805294 already deleted, retry=false 2024-11-22T03:40:06,838 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b458937b0f5f,40841,1732246805294 expired; onlineServers=0 2024-11-22T03:40:06,838 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b458937b0f5f,45127,1732246805134' ***** 2024-11-22T03:40:06,838 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:40:06,839 INFO [M:0;b458937b0f5f:45127 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:40:06,839 INFO [M:0;b458937b0f5f:45127 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:40:06,839 DEBUG [M:0;b458937b0f5f:45127 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:40:06,839 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:40:06,839 DEBUG [M:0;b458937b0f5f:45127 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:40:06,839 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246805647 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.large.0-1732246805647,5,FailOnTimeoutGroup] 2024-11-22T03:40:06,839 DEBUG [master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246805647 {}] cleaner.HFileCleaner(306): Exit Thread[master/b458937b0f5f:0:becomeActiveMaster-HFileCleaner.small.0-1732246805647,5,FailOnTimeoutGroup] 2024-11-22T03:40:06,840 INFO [M:0;b458937b0f5f:45127 {}] hbase.ChoreService(370): Chore service for: master/b458937b0f5f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:40:06,840 INFO [M:0;b458937b0f5f:45127 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:40:06,840 DEBUG [M:0;b458937b0f5f:45127 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:40:06,840 INFO [M:0;b458937b0f5f:45127 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:40:06,840 INFO [M:0;b458937b0f5f:45127 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:40:06,841 INFO [M:0;b458937b0f5f:45127 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:40:06,841 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:40:06,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:40:06,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:40:06,849 DEBUG [M:0;b458937b0f5f:45127 {}] zookeeper.ZKUtil(347): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:40:06,849 WARN [M:0;b458937b0f5f:45127 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:40:06,850 INFO [M:0;b458937b0f5f:45127 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/.lastflushedseqids 2024-11-22T03:40:06,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741838_1014 (size=99) 2024-11-22T03:40:06,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741838_1014 (size=99) 2024-11-22T03:40:06,858 INFO [M:0;b458937b0f5f:45127 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:40:06,859 INFO [M:0;b458937b0f5f:45127 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:40:06,859 DEBUG [M:0;b458937b0f5f:45127 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:40:06,859 INFO [M:0;b458937b0f5f:45127 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:40:06,859 DEBUG [M:0;b458937b0f5f:45127 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:40:06,859 DEBUG [M:0;b458937b0f5f:45127 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:40:06,859 DEBUG [M:0;b458937b0f5f:45127 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:40:06,859 INFO [M:0;b458937b0f5f:45127 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-22T03:40:06,879 DEBUG [M:0;b458937b0f5f:45127 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/212ea0032f254781a6ceecf7a0f382cb is 82, key is hbase:meta,,1/info:regioninfo/1732246806377/Put/seqid=0 2024-11-22T03:40:06,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741839_1015 (size=5672) 2024-11-22T03:40:06,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741839_1015 (size=5672) 2024-11-22T03:40:06,884 INFO [M:0;b458937b0f5f:45127 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/212ea0032f254781a6ceecf7a0f382cb 2024-11-22T03:40:06,902 DEBUG [M:0;b458937b0f5f:45127 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/62b3c7e621ec45fbace0cd696d0bfe5b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732246806430/Put/seqid=0 2024-11-22T03:40:06,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741840_1016 (size=5275) 2024-11-22T03:40:06,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741840_1016 (size=5275) 2024-11-22T03:40:06,907 INFO [M:0;b458937b0f5f:45127 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/62b3c7e621ec45fbace0cd696d0bfe5b 2024-11-22T03:40:06,924 DEBUG [M:0;b458937b0f5f:45127 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4636943b24c64025bfdbd71bbf676c6e is 69, key is b458937b0f5f,40841,1732246805294/rs:state/1732246805758/Put/seqid=0 2024-11-22T03:40:06,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741841_1017 (size=5156) 2024-11-22T03:40:06,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741841_1017 (size=5156) 2024-11-22T03:40:06,928 INFO [RS:0;b458937b0f5f:40841 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:40:06,928 INFO [RS:0;b458937b0f5f:40841 {}] regionserver.HRegionServer(1031): Exiting; stopping=b458937b0f5f,40841,1732246805294; zookeeper connection closed. 2024-11-22T03:40:06,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:40:06,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40841-0x10160a096260001, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:40:06,929 INFO [M:0;b458937b0f5f:45127 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4636943b24c64025bfdbd71bbf676c6e 2024-11-22T03:40:06,929 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@27e82bbc {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@27e82bbc 2024-11-22T03:40:06,929 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T03:40:06,944 DEBUG [M:0;b458937b0f5f:45127 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c35e52b038ca42cb96602e2bd9de24a3 is 52, key is load_balancer_on/state:d/1732246806537/Put/seqid=0 2024-11-22T03:40:06,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741842_1018 (size=5056) 2024-11-22T03:40:06,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741842_1018 (size=5056) 2024-11-22T03:40:06,949 INFO [M:0;b458937b0f5f:45127 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c35e52b038ca42cb96602e2bd9de24a3 2024-11-22T03:40:06,953 DEBUG [M:0;b458937b0f5f:45127 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/212ea0032f254781a6ceecf7a0f382cb as hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/212ea0032f254781a6ceecf7a0f382cb 2024-11-22T03:40:06,957 INFO [M:0;b458937b0f5f:45127 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/212ea0032f254781a6ceecf7a0f382cb, entries=8, sequenceid=29, filesize=5.5 K 2024-11-22T03:40:06,958 DEBUG [M:0;b458937b0f5f:45127 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/62b3c7e621ec45fbace0cd696d0bfe5b as hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/62b3c7e621ec45fbace0cd696d0bfe5b 2024-11-22T03:40:06,961 INFO [M:0;b458937b0f5f:45127 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/62b3c7e621ec45fbace0cd696d0bfe5b, entries=3, sequenceid=29, filesize=5.2 K 2024-11-22T03:40:06,962 DEBUG [M:0;b458937b0f5f:45127 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4636943b24c64025bfdbd71bbf676c6e as hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4636943b24c64025bfdbd71bbf676c6e 2024-11-22T03:40:06,966 INFO [M:0;b458937b0f5f:45127 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4636943b24c64025bfdbd71bbf676c6e, entries=1, sequenceid=29, filesize=5.0 K 2024-11-22T03:40:06,966 DEBUG [M:0;b458937b0f5f:45127 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c35e52b038ca42cb96602e2bd9de24a3 as hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c35e52b038ca42cb96602e2bd9de24a3 2024-11-22T03:40:06,970 INFO [M:0;b458937b0f5f:45127 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45665/user/jenkins/test-data/d833dee8-642e-2dfe-1680-1d74637aba67/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c35e52b038ca42cb96602e2bd9de24a3, entries=1, sequenceid=29, filesize=4.9 K 2024-11-22T03:40:06,971 INFO [M:0;b458937b0f5f:45127 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=29, compaction requested=false 2024-11-22T03:40:06,972 INFO [M:0;b458937b0f5f:45127 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:40:06,972 DEBUG [M:0;b458937b0f5f:45127 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732246806859Disabling compacts and flushes for region at 1732246806859Disabling writes for close at 1732246806859Obtaining lock to block concurrent updates at 1732246806859Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732246806859Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732246806859Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732246806860 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732246806860Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732246806879 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732246806879Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732246806888 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732246806902 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732246806902Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732246806911 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732246806923 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732246806923Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732246806932 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732246806943 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732246806943Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20313d4b: reopening flushed file at 1732246806952 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c9d911a: reopening flushed file at 1732246806957 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38d89b60: reopening flushed file at 1732246806962 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68cf36d0: reopening flushed file at 1732246806966 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=29, compaction requested=false at 1732246806971 (+5 ms)Writing region close event to WAL at 1732246806972 (+1 ms)Closed at 1732246806972 2024-11-22T03:40:06,973 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,973 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,973 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,973 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,973 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:40:06,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40483 is added to blk_1073741830_1006 (size=10311) 2024-11-22T03:40:06,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43511 is added to blk_1073741830_1006 (size=10311) 2024-11-22T03:40:06,975 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:40:06,975 INFO [M:0;b458937b0f5f:45127 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:40:06,975 INFO [M:0;b458937b0f5f:45127 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45127 2024-11-22T03:40:06,975 INFO [M:0;b458937b0f5f:45127 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:40:07,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:40:07,086 INFO [M:0;b458937b0f5f:45127 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:40:07,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x10160a096260000, quorum=127.0.0.1:59590, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:40:07,088 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ac6b403{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:40:07,088 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5c95cc3d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:40:07,088 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:40:07,089 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62eaa2bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:40:07,089 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11436c17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/hadoop.log.dir/,STOPPED} 2024-11-22T03:40:07,090 WARN [BP-620697003-172.17.0.3-1732246802833 heartbeating to localhost/127.0.0.1:45665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:40:07,090 WARN [BP-620697003-172.17.0.3-1732246802833 heartbeating to localhost/127.0.0.1:45665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-620697003-172.17.0.3-1732246802833 (Datanode Uuid f80a2bd4-a694-48b5-96db-7fe3ad150289) service to localhost/127.0.0.1:45665 2024-11-22T03:40:07,090 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:40:07,090 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:40:07,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/cluster_048b44b4-a4d8-86d9-cee4-c7274e4e832a/data/data3/current/BP-620697003-172.17.0.3-1732246802833 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:40:07,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/cluster_048b44b4-a4d8-86d9-cee4-c7274e4e832a/data/data4/current/BP-620697003-172.17.0.3-1732246802833 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:40:07,091 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:40:07,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78d8a6b1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:40:07,093 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5794b4b6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:40:07,093 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:40:07,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76ba698c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:40:07,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@751df37f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/hadoop.log.dir/,STOPPED} 2024-11-22T03:40:07,094 WARN [BP-620697003-172.17.0.3-1732246802833 heartbeating to localhost/127.0.0.1:45665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:40:07,094 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:40:07,094 WARN [BP-620697003-172.17.0.3-1732246802833 heartbeating to localhost/127.0.0.1:45665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-620697003-172.17.0.3-1732246802833 (Datanode Uuid 96ab1abc-1a8b-4ae6-93f4-4f06f1d63aa9) service to localhost/127.0.0.1:45665 2024-11-22T03:40:07,094 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:40:07,094 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/cluster_048b44b4-a4d8-86d9-cee4-c7274e4e832a/data/data1/current/BP-620697003-172.17.0.3-1732246802833 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:40:07,094 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/cluster_048b44b4-a4d8-86d9-cee4-c7274e4e832a/data/data2/current/BP-620697003-172.17.0.3-1732246802833 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:40:07,095 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:40:07,099 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b3b93a7{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:40:07,100 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e8c567b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:40:07,100 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:40:07,100 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c1c6682{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:40:07,100 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cce16d3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0e87d7a1-f698-d78b-8607-ecc851708b8f/hadoop.log.dir/,STOPPED} 2024-11-22T03:40:07,106 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:40:07,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:40:07,130 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 229) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:45665 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:45665 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45665 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45665 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45665 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (566375312) connection to localhost/127.0.0.1:45665 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45665 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:45665 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=534 (was 518) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=164 (was 161) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6655 (was 6671)